diff --git a/.gitattributes b/.gitattributes index 1ef325f1b111266a6b26e0196871bd78baa8c2f3..a6344aac8c09253b3b630fb776ae94478aa0275b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -8,8 +8,6 @@ *.h5 filter=lfs diff=lfs merge=lfs -text *.joblib filter=lfs diff=lfs merge=lfs -text *.lfs.* filter=lfs diff=lfs merge=lfs -text -*.lz4 filter=lfs diff=lfs merge=lfs -text -*.mds filter=lfs diff=lfs merge=lfs -text *.mlmodel filter=lfs diff=lfs merge=lfs -text *.model filter=lfs diff=lfs merge=lfs -text *.msgpack filter=lfs diff=lfs merge=lfs -text @@ -35,25 +33,3 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text -# Audio files - uncompressed -*.pcm filter=lfs diff=lfs merge=lfs -text -*.sam filter=lfs diff=lfs merge=lfs -text -*.raw filter=lfs diff=lfs merge=lfs -text -# Audio files - compressed -*.aac filter=lfs diff=lfs merge=lfs -text -*.flac filter=lfs diff=lfs merge=lfs -text -*.mp3 filter=lfs diff=lfs merge=lfs -text -*.ogg filter=lfs diff=lfs merge=lfs -text -*.wav filter=lfs diff=lfs merge=lfs -text -# Image files - uncompressed -*.bmp filter=lfs diff=lfs merge=lfs -text -*.gif filter=lfs diff=lfs merge=lfs -text -*.png filter=lfs diff=lfs merge=lfs -text -*.tiff filter=lfs diff=lfs merge=lfs -text -# Image files - compressed -*.jpg filter=lfs diff=lfs merge=lfs -text -*.jpeg filter=lfs diff=lfs merge=lfs -text -*.webp filter=lfs diff=lfs merge=lfs -text -# Video files - compressed -*.mp4 filter=lfs diff=lfs merge=lfs -text -*.webm filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2b22675b8f61a2e5c7b2edbad33c102d129a157e --- /dev/null +++ b/README.md @@ -0,0 +1,12 @@ +--- +title: Mft Log +emoji: 📚 +colorFrom: pink +colorTo: pink +sdk: gradio +sdk_version: 5.49.1 +app_file: app.py +pinned: false +--- + +Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055757.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055757.log new file mode 100644 index 0000000000000000000000000000000000000000..cc5f8d49c49c3f3adb72f6592a24702c563e0ce2 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055757.log @@ -0,0 +1,236 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055757.log +Timestamp: 2025-10-10 05:57:57 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a one-time only operation. You can interrupt this and resume the migration later on by calling `transformers.utils.move_cache()`. + 0it [00:00, ?it/s] 0it [00:00, ?it/s] +[2025-10-10 05:58:00,052] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:03,277] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 05:58:03,278] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.3 --temperature_mlp_text 0.3 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.3 --temperature_mlp_vision 0.3 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.3 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:58:05,879] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:06,963] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 05:58:06,963] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 05:58:06,963] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 05:58:06,963] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 05:58:06,963] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 05:58:06,963] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 05:58:06,963] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 05:58:06,966] [INFO] [launch.py:253:main] process 548567 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:06,968] [INFO] [launch.py:253:main] process 548568 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:06,970] [INFO] [launch.py:253:main] process 548569 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:06,972] [INFO] [launch.py:253:main] process 548570 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:06,973] [INFO] [launch.py:253:main] process 548571 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:06,975] [INFO] [launch.py:253:main] process 548572 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:06,977] [INFO] [launch.py:253:main] process 548573 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:06,979] [INFO] [launch.py:253:main] process 548574 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:58:13,755] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:13,756] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:13,756] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:13,763] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:13,775] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:13,775] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:13,804] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:13,838] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:14,271] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 05:58:14,271] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.3, 'temperature_mlp': 0.3, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.3, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.3, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.3, + "temperature_mlp": 0.3, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed. + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train + model = training_recipe.load(model, model_args) + File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load + model.load_llm(**model_args['llm']) + File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm + self.language_model = self.language_model.from_pretrained( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained + resolved_config_file = cached_file( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub. +[2025-10-10 05:58:46,032] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548567 +[2025-10-10 05:58:46,418] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548568 +[2025-10-10 05:58:46,881] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548569 +[2025-10-10 05:58:47,265] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548570 +[2025-10-10 05:58:47,683] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548571 +[2025-10-10 05:58:48,100] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548572 +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed. + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train + model = training_recipe.load(model, model_args) + File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load + model.load_llm(**model_args['llm']) + File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm + self.language_model = self.language_model.from_pretrained( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained +[2025-10-10 05:58:48,477] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548573 + resolved_config_file = cached_file( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub. +[2025-10-10 05:58:48,893] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 548574 +[2025-10-10 05:58:48,894] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055757.log +Timestamp: 2025-10-10 05:58:50 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055857.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055857.log new file mode 100644 index 0000000000000000000000000000000000000000..004ee551a82e52842e73d5aa5f796725073a0080 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055857.log @@ -0,0 +1,1167 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055857.log +Timestamp: 2025-10-10 05:58:57 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:58:59,883] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:02,536] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 05:59:02,537] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.3 --temperature_mlp_text 0.3 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.3 --temperature_mlp_vision 0.3 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.3 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:59:05,135] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:06,209] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 05:59:06,209] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 05:59:06,210] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 05:59:06,210] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 05:59:06,210] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 05:59:06,210] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 05:59:06,210] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 05:59:06,212] [INFO] [launch.py:253:main] process 550631 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:06,214] [INFO] [launch.py:253:main] process 550634 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:06,216] [INFO] [launch.py:253:main] process 550635 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:06,218] [INFO] [launch.py:253:main] process 550636 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:06,220] [INFO] [launch.py:253:main] process 550637 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:06,222] [INFO] [launch.py:253:main] process 550640 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:06,223] [INFO] [launch.py:253:main] process 550641 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:06,225] [INFO] [launch.py:253:main] process 550642 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:59:12,968] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:13,007] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:13,249] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:13,251] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:13,284] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:13,286] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:13,319] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:13,320] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:13,377] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:13,399] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:13,649] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:13,657] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:13,688] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:13,688] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:13,711] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:13,711] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 05:59:13,723] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.3, 'temperature_mlp': 0.3, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.3, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.3, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.3, + "temperature_mlp": 0.3, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test2-worker-0:550631:550631 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550631:550631 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550631:550631 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:550631:550631 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:550631:550631 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:550631:550631 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test2-worker-0:550635:550635 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:550635:550635 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550635:550635 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550635:550635 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:550635:550635 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:550635:550635 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:550637:550637 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:550637:550637 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550637:550637 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550637:550637 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:550637:550637 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:550637:550637 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:550641:550641 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:550641:550641 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550641:550641 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550641:550641 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:550641:550641 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:550641:550641 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:550634:550634 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:550634:550634 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550634:550634 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550634:550634 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:550634:550634 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:550634:550634 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:550640:550640 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:550640:550640 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550640:550640 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550640:550640 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:550640:550640 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:550640:550640 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:550642:550642 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:550642:550642 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550642:550642 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550642:550642 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:550642:550642 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:550642:550642 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:550636:550636 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:550636:550636 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550636:550636 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550636:550636 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:550636:550636 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:550636:550636 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO ncclCommInitRank comm 0x561965201fd0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xe728439617450310 - Init START +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO ncclCommInitRank comm 0x5631b34ac820 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xe728439617450310 - Init START +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO ncclCommInitRank comm 0x5639328be8a0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xe728439617450310 - Init START +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO ncclCommInitRank comm 0x558d21b356a0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xe728439617450310 - Init START +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO ncclCommInitRank comm 0x55cc0f8e2ca0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xe728439617450310 - Init START +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO ncclCommInitRank comm 0x55e2da1e88b0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xe728439617450310 - Init START +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO ncclCommInitRank comm 0x560a8467c720 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xe728439617450310 - Init START +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO ncclCommInitRank comm 0x559e04fc5c60 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xe728439617450310 - Init START +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO comm 0x559e04fc5c60 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO comm 0x561965201fd0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO comm 0x5631b34ac820 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO comm 0x558d21b356a0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO comm 0x55e2da1e88b0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO comm 0x560a8467c720 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO comm 0x5639328be8a0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO comm 0x55cc0f8e2ca0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:550635:552339 [2] NCCL INFO ncclCommInitRank comm 0x559e04fc5c60 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xe728439617450310 - Init COMPLETE +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:550641:552358 [6] NCCL INFO ncclCommInitRank comm 0x5631b34ac820 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xe728439617450310 - Init COMPLETE +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:550631:552338 [0] NCCL INFO ncclCommInitRank comm 0x560a8467c720 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xe728439617450310 - Init COMPLETE +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:550637:552357 [4] NCCL INFO ncclCommInitRank comm 0x5639328be8a0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xe728439617450310 - Init COMPLETE +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:550640:552360 [5] NCCL INFO ncclCommInitRank comm 0x558d21b356a0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xe728439617450310 - Init COMPLETE +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:550634:552359 [1] NCCL INFO ncclCommInitRank comm 0x55e2da1e88b0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xe728439617450310 - Init COMPLETE +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:550642:552361 [7] NCCL INFO ncclCommInitRank comm 0x55cc0f8e2ca0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xe728439617450310 - Init COMPLETE +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:550636:552362 [3] NCCL INFO ncclCommInitRank comm 0x561965201fd0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xe728439617450310 - Init COMPLETE +[2025-10-10 05:59:59,734] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 06:00:03,316] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + train() File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, +train() File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + list_data_dict = json.load(open(data_path, "r")) +list_data_dict = json.load(open(data_path, "r")) +FileNotFoundErrorFileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json': +[Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +Pre-training init connector._connector.0.scores: Mean=1.000005 +Pre-training init connector._connector.2.scores: Mean=0.999970 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +[2025-10-10 06:00:06,295] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 550631 +[2025-10-10 06:00:06,296] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 550634 +[2025-10-10 06:00:06,297] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 550635 +[2025-10-10 06:00:06,299] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 550636 +[2025-10-10 06:00:06,299] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 550637 +[2025-10-10 06:00:06,300] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 550640 +[2025-10-10 06:00:06,301] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 550641 +[2025-10-10 06:00:06,303] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 550642 +[2025-10-10 06:00:06,304] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_055857.log +Timestamp: 2025-10-10 06:00:07 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060221.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060221.log new file mode 100644 index 0000000000000000000000000000000000000000..82b7e919d3ec20c6479d356ef92d0fdfddc9f61a --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060221.log @@ -0,0 +1,1167 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060221.log +Timestamp: 2025-10-10 06:02:21 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:02:24,596] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:27,325] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 06:02:27,327] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.3 --temperature_mlp_text 0.3 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.3 --temperature_mlp_vision 0.3 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.3 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:02:29,898] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:30,930] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 06:02:30,930] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 06:02:30,930] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 06:02:30,930] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 06:02:30,930] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 06:02:30,930] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 06:02:30,930] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 06:02:30,933] [INFO] [launch.py:253:main] process 554958 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:30,935] [INFO] [launch.py:253:main] process 554959 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:30,937] [INFO] [launch.py:253:main] process 554960 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:30,939] [INFO] [launch.py:253:main] process 554961 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:30,941] [INFO] [launch.py:253:main] process 554962 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:30,943] [INFO] [launch.py:253:main] process 554963 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:30,945] [INFO] [launch.py:253:main] process 554964 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:30,947] [INFO] [launch.py:253:main] process 554965 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:02:38,016] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:38,122] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:38,229] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:38,266] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:38,280] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:38,282] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:38,284] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:38,290] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:38,435] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:38,532] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:38,636] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:38,673] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:38,689] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:38,695] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:38,698] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:38,698] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 06:02:38,700] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.3, 'temperature_mlp': 0.3, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.3, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.3, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.3, + "temperature_mlp": 0.3, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test2-worker-0:554958:554958 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554958:554958 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554958:554958 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:554958:554958 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:554958:554958 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:554958:554958 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test2-worker-0:554964:554964 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:554964:554964 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554964:554964 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554964:554964 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:554964:554964 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:554964:554964 [6] NCCL INFO NET/Plugin: Using internal network plugin. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:554960:554960 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:554960:554960 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554960:554960 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554960:554960 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:554960:554960 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:554960:554960 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:554962:554962 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:554962:554962 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554962:554962 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554962:554962 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:554962:554962 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:554962:554962 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:554965:554965 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:554965:554965 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554965:554965 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554965:554965 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:554965:554965 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:554965:554965 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:554963:554963 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:554963:554963 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554963:554963 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554963:554963 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:554963:554963 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:554963:554963 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:554961:554961 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:554961:554961 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554961:554961 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554961:554961 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:554961:554961 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:554961:554961 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:554959:554959 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:554959:554959 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554959:554959 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554959:554959 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:554959:554959 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:554959:554959 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO ncclCommInitRank comm 0x564f1a287140 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xc0d92278a25768b5 - Init START +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO ncclCommInitRank comm 0x55cc223b9300 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xc0d92278a25768b5 - Init START +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO ncclCommInitRank comm 0x55b5ade19480 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xc0d92278a25768b5 - Init START +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO ncclCommInitRank comm 0x555ceec467f0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xc0d92278a25768b5 - Init START +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO ncclCommInitRank comm 0x55788f1441c0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xc0d92278a25768b5 - Init START +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO ncclCommInitRank comm 0x56312df97930 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xc0d92278a25768b5 - Init START +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO ncclCommInitRank comm 0x5612c16810f0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xc0d92278a25768b5 - Init START +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO ncclCommInitRank comm 0x55f0c25191f0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xc0d92278a25768b5 - Init START +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO comm 0x55f0c25191f0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO comm 0x55788f1441c0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO comm 0x5612c16810f0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO comm 0x564f1a287140 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO comm 0x55b5ade19480 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO comm 0x56312df97930 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO comm 0x555ceec467f0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO comm 0x55cc223b9300 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:554964:556645 [6] NCCL INFO ncclCommInitRank comm 0x55cc223b9300 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xc0d92278a25768b5 - Init COMPLETE +ywang29-vrdb-test2-worker-0:554963:556649 [5] NCCL INFO ncclCommInitRank comm 0x555ceec467f0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xc0d92278a25768b5 - Init COMPLETE +ywang29-vrdb-test2-worker-0:554965:556648 [7] NCCL INFO ncclCommInitRank comm 0x55b5ade19480 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xc0d92278a25768b5 - Init COMPLETE +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:554962:556647 [4] NCCL INFO ncclCommInitRank comm 0x55788f1441c0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xc0d92278a25768b5 - Init COMPLETE +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:554960:556646 [2] NCCL INFO ncclCommInitRank comm 0x55f0c25191f0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xc0d92278a25768b5 - Init COMPLETE +ywang29-vrdb-test2-worker-0:554958:556644 [0] NCCL INFO ncclCommInitRank comm 0x56312df97930 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xc0d92278a25768b5 - Init COMPLETE +ywang29-vrdb-test2-worker-0:554961:556653 [3] NCCL INFO ncclCommInitRank comm 0x5612c16810f0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xc0d92278a25768b5 - Init COMPLETE +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:554959:556654 [1] NCCL INFO ncclCommInitRank comm 0x564f1a287140 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xc0d92278a25768b5 - Init COMPLETE +[2025-10-10 06:03:24,739] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 06:03:26,491] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError : [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json'data_module = make_supervised_data_module(tokenizer=tokenizer, + + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module +Traceback (most recent call last): + list_data_dict = json.load(open(data_path, "r")) + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ +Pre-training init connector._connector.0.scores: Mean=1.000005 + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +Pre-training init connector._connector.2.scores: Mean=0.999970 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +[2025-10-10 06:03:29,009] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 554958 +[2025-10-10 06:03:29,011] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 554959 +[2025-10-10 06:03:29,224] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 554960 +[2025-10-10 06:03:29,225] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 554961 +[2025-10-10 06:03:29,519] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 554962 +[2025-10-10 06:03:29,519] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 554963 +[2025-10-10 06:03:29,520] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 554964 +[2025-10-10 06:03:29,521] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 554965 +[2025-10-10 06:03:29,522] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060221.log +Timestamp: 2025-10-10 06:03:30 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060753.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060753.log new file mode 100644 index 0000000000000000000000000000000000000000..3a5c5a1e242c0faab130163d81604bb1ccebb06c --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060753.log @@ -0,0 +1,2322 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060753.log +Timestamp: 2025-10-10 06:07:53 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:07:55,955] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:07:58,680] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 06:07:58,682] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.3 --temperature_mlp_text 0.3 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.3 --temperature_mlp_vision 0.3 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.3 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:08:01,275] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:08:02,326] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 06:08:02,326] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 06:08:02,326] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 06:08:02,326] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 06:08:02,326] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 06:08:02,326] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 06:08:02,326] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 06:08:02,329] [INFO] [launch.py:253:main] process 564843 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:08:02,331] [INFO] [launch.py:253:main] process 564844 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:08:02,333] [INFO] [launch.py:253:main] process 564845 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:08:02,335] [INFO] [launch.py:253:main] process 564846 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:08:02,336] [INFO] [launch.py:253:main] process 564847 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:08:02,338] [INFO] [launch.py:253:main] process 564848 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:08:02,340] [INFO] [launch.py:253:main] process 564849 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:08:02,342] [INFO] [launch.py:253:main] process 564850 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.3', '--temperature_mlp_text', '0.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.3', '--temperature_mlp_vision', '0.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:08:09,115] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:08:09,363] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:08:09,430] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:08:09,431] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:08:09,432] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:08:09,433] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:08:09,483] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:08:09,491] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:08:09,527] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:08:09,763] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:08:09,831] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:08:09,833] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:08:09,833] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 06:08:09,835] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:08:09,837] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:08:09,881] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:08:09,896] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.3, 'temperature_mlp': 0.3, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.3, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.3, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.3, + "temperature_mlp": 0.3, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:564843:564843 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564843:564843 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564843:564843 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:564843:564843 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:564843:564843 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:564843:564843 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test2-worker-0:564844:564844 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:564844:564844 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564844:564844 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564844:564844 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:564844:564844 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:564844:564844 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:564846:564846 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:564846:564846 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564846:564846 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564846:564846 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:564846:564846 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:564846:564846 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:564847:564847 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:564847:564847 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564847:564847 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564847:564847 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:564847:564847 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:564847:564847 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:564849:564849 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:564849:564849 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564849:564849 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564849:564849 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:564849:564849 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:564849:564849 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:564845:564845 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:564845:564845 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564845:564845 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564845:564845 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:564845:564845 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:564845:564845 [2] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:564850:564850 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:564850:564850 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564850:564850 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564850:564850 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:564850:564850 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:564850:564850 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:564848:564848 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:564848:564848 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564848:564848 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564848:564848 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:564848:564848 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:564848:564848 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO ncclCommInitRank comm 0x56144d05cbb0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x54f00b97c10fa224 - Init START +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO ncclCommInitRank comm 0x561cf2c48140 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x54f00b97c10fa224 - Init START +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO ncclCommInitRank comm 0x55a70d5298d0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x54f00b97c10fa224 - Init START +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO ncclCommInitRank comm 0x56262f37b330 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x54f00b97c10fa224 - Init START +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO ncclCommInitRank comm 0x5556b2b759f0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x54f00b97c10fa224 - Init START +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO ncclCommInitRank comm 0x562d22ba7880 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x54f00b97c10fa224 - Init START +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO ncclCommInitRank comm 0x557684623910 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x54f00b97c10fa224 - Init START +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO ncclCommInitRank comm 0x560216f66790 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x54f00b97c10fa224 - Init START +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO comm 0x557684623910 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO comm 0x56144d05cbb0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO comm 0x56262f37b330 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO comm 0x561cf2c48140 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO comm 0x55a70d5298d0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO comm 0x562d22ba7880 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO comm 0x5556b2b759f0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO comm 0x560216f66790 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:564847:566521 [4] NCCL INFO ncclCommInitRank comm 0x561cf2c48140 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x54f00b97c10fa224 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:564850:566527 [7] NCCL INFO ncclCommInitRank comm 0x560216f66790 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x54f00b97c10fa224 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564848:566528 [5] NCCL INFO ncclCommInitRank comm 0x56144d05cbb0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x54f00b97c10fa224 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:564846:566520 [3] NCCL INFO ncclCommInitRank comm 0x56262f37b330 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x54f00b97c10fa224 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564844:566519 [1] NCCL INFO ncclCommInitRank comm 0x562d22ba7880 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x54f00b97c10fa224 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564843:566518 [0] NCCL INFO ncclCommInitRank comm 0x5556b2b759f0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x54f00b97c10fa224 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:564849:566522 [6] NCCL INFO ncclCommInitRank comm 0x557684623910 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x54f00b97c10fa224 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:564845:566523 [2] NCCL INFO ncclCommInitRank comm 0x55a70d5298d0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x54f00b97c10fa224 - Init COMPLETE +[2025-10-10 06:08:55,645] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 06:08:57,331] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Pre-training init connector._connector.0.scores: Mean=1.000005 +Pre-training init connector._connector.2.scores: Mean=0.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 06:09:17,228 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 06:09:17,235 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:001->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:564847:571550 [4] NCCL INFO ncclCommInitRank comm 0x7f9c0c06b7a0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x71e36b1956f75fc8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564849:571551 [6] NCCL INFO ncclCommInitRank comm 0x7f5c6406ba60 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x71e36b1956f75fc8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564850:571552 [7] NCCL INFO ncclCommInitRank comm 0x7f68d806a810 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x71e36b1956f75fc8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564843:571545 [0] NCCL INFO ncclCommInitRank comm 0x7fc04806b490 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x71e36b1956f75fc8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564845:571547 [2] NCCL INFO ncclCommInitRank comm 0x7f423806abd0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x71e36b1956f75fc8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564846:571546 [3] NCCL INFO ncclCommInitRank comm 0x7f67d406b730 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x71e36b1956f75fc8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564848:571548 [5] NCCL INFO ncclCommInitRank comm 0x7fa01806b160 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x71e36b1956f75fc8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:564844:571549 [1] NCCL INFO ncclCommInitRank comm 0x7fa46806af40 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x71e36b1956f75fc8 - Init COMPLETE + 0%| | 1/520 [00:14<2:08:47, 14.89s/it] {'loss': 2.0585, 'grad_norm': 0.06665477170345743, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:08:47, 14.89s/it] 0%| | 2/520 [00:18<1:12:32, 8.40s/it] {'loss': 2.063, 'grad_norm': 0.07200560709505595, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:18<1:12:32, 8.40s/it] 1%| | 3/520 [00:22<54:30, 6.33s/it] {'loss': 1.6705, 'grad_norm': 0.03593751719829739, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:22<54:30, 6.33s/it] 1%| | 4/520 [00:26<46:05, 5.36s/it] {'loss': 1.5828, 'grad_norm': 0.014324711445596872, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:26<46:05, 5.36s/it] 1%| | 5/520 [00:30<41:30, 4.84s/it] {'loss': 1.5893, 'grad_norm': 0.012690854874129284, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:30<41:30, 4.84s/it] 1%| | 6/520 [00:34<38:47, 4.53s/it] {'loss': 1.4087, 'grad_norm': 0.007888325070839367, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:34<38:47, 4.53s/it] 1%|▏ | 7/520 [00:38<36:49, 4.31s/it] {'loss': 1.4305, 'grad_norm': 0.009743777930700444, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:38<36:49, 4.31s/it] 2%|▏ | 8/520 [00:42<37:06, 4.35s/it] {'loss': 1.4923, 'grad_norm': 0.01084122763471347, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:42<37:06, 4.35s/it] 2%|▏ | 9/520 [00:47<37:29, 4.40s/it] {'loss': 1.5787, 'grad_norm': 0.010404463595404131, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:47<37:29, 4.40s/it] 2%|▏ | 10/520 [00:50<35:43, 4.20s/it] {'loss': 1.402, 'grad_norm': 0.010028060948292542, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:50<35:43, 4.20s/it] 2%|▏ | 11/520 [00:54<34:43, 4.09s/it] {'loss': 1.4995, 'grad_norm': 0.008251151692235743, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:54<34:43, 4.09s/it] 2%|▏ | 12/520 [00:58<33:39, 3.98s/it] {'loss': 1.4404, 'grad_norm': 0.008324047322294506, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:58<33:39, 3.98s/it][2025-10-10 06:10:24,528] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:02<34:40, 4.10s/it] {'loss': 1.4717, 'grad_norm': 0.008685487868107859, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:02<34:40, 4.10s/it] 3%|▎ | 14/520 [01:06<33:33, 3.98s/it] {'loss': 1.6006, 'grad_norm': 0.011011690990020683, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:06<33:33, 3.98s/it] 3%|▎ | 15/520 [01:10<32:41, 3.88s/it] {'loss': 1.5846, 'grad_norm': 0.009909978863371314, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:10<32:41, 3.88s/it] 3%|▎ | 16/520 [01:13<32:04, 3.82s/it] {'loss': 1.5782, 'grad_norm': 0.015178850006117585, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:13<32:04, 3.82s/it] 3%|▎ | 17/520 [01:17<31:40, 3.78s/it] {'loss': 1.7496, 'grad_norm': 0.010888839317339267, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:17<31:40, 3.78s/it] 3%|▎ | 18/520 [01:21<31:21, 3.75s/it] {'loss': 1.6088, 'grad_norm': 0.01358810079420564, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:21<31:21, 3.75s/it] 4%|▎ | 19/520 [01:24<31:10, 3.73s/it] {'loss': 1.7929, 'grad_norm': 0.018145210547312517, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:24<31:10, 3.73s/it] 4%|▍ | 20/520 [01:28<30:53, 3.71s/it] {'loss': 1.8776, 'grad_norm': 0.03654435332967568, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:28<30:53, 3.71s/it] 4%|▍ | 21/520 [01:32<30:53, 3.71s/it] {'loss': 1.9294, 'grad_norm': 0.025846520083595947, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:32<30:53, 3.71s/it] 4%|▍ | 22/520 [01:35<30:45, 3.71s/it] {'loss': 2.0346, 'grad_norm': 0.06239399151628125, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:35<30:45, 3.71s/it] 4%|▍ | 23/520 [01:39<30:38, 3.70s/it] {'loss': 1.8984, 'grad_norm': 0.01722764492556324, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:39<30:38, 3.70s/it] 5%|▍ | 24/520 [01:43<30:29, 3.69s/it] {'loss': 1.896, 'grad_norm': 0.016495187668248975, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:43<30:29, 3.69s/it] 5%|▍ | 25/520 [01:47<30:23, 3.68s/it] {'loss': 1.8653, 'grad_norm': 0.014707109461866059, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:47<30:23, 3.68s/it] 5%|▌ | 26/520 [01:50<30:22, 3.69s/it] {'loss': 1.9632, 'grad_norm': 0.018923843681410736, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:50<30:22, 3.69s/it] 5%|▌ | 27/520 [01:54<30:18, 3.69s/it] {'loss': 2.0854, 'grad_norm': 0.09517390492080355, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:54<30:18, 3.69s/it] 5%|▌ | 28/520 [01:58<30:13, 3.69s/it] {'loss': 2.2092, 'grad_norm': 0.03859682288794566, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:58<30:13, 3.69s/it] 6%|▌ | 29/520 [02:01<30:12, 3.69s/it] {'loss': 2.2105, 'grad_norm': 0.044744813476157726, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:01<30:12, 3.69s/it] 6%|▌ | 30/520 [02:05<30:14, 3.70s/it] {'loss': 2.9213, 'grad_norm': 0.06271850198480428, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:05<30:14, 3.70s/it] 6%|▌ | 31/520 [02:09<30:05, 3.69s/it] {'loss': 2.0246, 'grad_norm': 0.025550614930626124, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:09<30:05, 3.69s/it] 6%|▌ | 32/520 [02:12<29:57, 3.68s/it] {'loss': 3.0519, 'grad_norm': 0.04271763625407127, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:12<29:57, 3.68s/it] 6%|▋ | 33/520 [02:16<29:53, 3.68s/it] {'loss': 2.1024, 'grad_norm': 0.026927016465796715, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:16<29:53, 3.68s/it] 7%|▋ | 34/520 [02:20<29:48, 3.68s/it] {'loss': 2.0059, 'grad_norm': 0.03413636123815877, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:20<29:48, 3.68s/it] 7%|▋ | 35/520 [02:23<29:48, 3.69s/it] {'loss': 2.06, 'grad_norm': 0.017976466349279848, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:23<29:48, 3.69s/it] 7%|▋ | 36/520 [02:27<29:46, 3.69s/it] {'loss': 2.0966, 'grad_norm': 0.012956763713797729, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:27<29:46, 3.69s/it] 7%|▋ | 37/520 [02:31<29:44, 3.69s/it] {'loss': 2.4648, 'grad_norm': 0.017694513596964768, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:31<29:44, 3.69s/it] 7%|▋ | 38/520 [02:35<29:46, 3.71s/it] {'loss': 2.1285, 'grad_norm': 0.012139724885177571, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:35<29:46, 3.71s/it] 8%|▊ | 39/520 [02:38<29:42, 3.70s/it] {'loss': 1.8856, 'grad_norm': 0.009812792566997583, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:38<29:42, 3.70s/it] 8%|▊ | 40/520 [02:42<29:33, 3.69s/it] {'loss': 1.9195, 'grad_norm': 0.01350225746033211, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:42<29:33, 3.69s/it] 8%|▊ | 41/520 [02:46<29:36, 3.71s/it] {'loss': 1.8824, 'grad_norm': 0.015342219663648408, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:46<29:36, 3.71s/it] 8%|▊ | 42/520 [02:49<29:32, 3.71s/it] {'loss': 1.894, 'grad_norm': 0.00990135707046395, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:49<29:32, 3.71s/it] 8%|▊ | 43/520 [02:53<29:28, 3.71s/it] {'loss': 2.2667, 'grad_norm': 0.013986158210743303, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:53<29:28, 3.71s/it] 8%|▊ | 44/520 [02:57<29:21, 3.70s/it] {'loss': 2.3584, 'grad_norm': 0.01287772212302402, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:57<29:21, 3.70s/it] 9%|▊ | 45/520 [03:00<29:15, 3.70s/it] {'loss': 1.8604, 'grad_norm': 0.00891405056457498, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:00<29:15, 3.70s/it] 9%|▉ | 46/520 [03:04<29:08, 3.69s/it] {'loss': 2.4392, 'grad_norm': 0.023182300056106588, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:04<29:08, 3.69s/it] 9%|▉ | 47/520 [03:08<29:02, 3.68s/it] {'loss': 1.8579, 'grad_norm': 0.007487073822678914, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:08<29:02, 3.68s/it] 9%|▉ | 48/520 [03:12<29:05, 3.70s/it] {'loss': 1.7982, 'grad_norm': 0.007059572698311626, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:12<29:05, 3.70s/it] 9%|▉ | 49/520 [03:15<29:00, 3.70s/it] {'loss': 1.7834, 'grad_norm': 0.006378226074770033, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:15<29:00, 3.70s/it] 10%|▉ | 50/520 [03:19<28:55, 3.69s/it] {'loss': 1.8025, 'grad_norm': 0.006845647614348182, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:19<28:55, 3.69s/it] 10%|▉ | 51/520 [03:23<28:46, 3.68s/it] {'loss': 1.728, 'grad_norm': 0.011522895160554311, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:23<28:46, 3.68s/it] 10%|█ | 52/520 [03:26<28:39, 3.67s/it] {'loss': 1.9185, 'grad_norm': 0.01017548338335874, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:26<28:39, 3.67s/it] 10%|█ | 53/520 [03:30<28:30, 3.66s/it] {'loss': 1.8687, 'grad_norm': 0.006297532401602511, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:30<28:30, 3.66s/it] 10%|█ | 54/520 [03:34<28:30, 3.67s/it] {'loss': 1.7223, 'grad_norm': 0.006950532760722357, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:34<28:30, 3.67s/it] 11%|█ | 55/520 [03:37<28:29, 3.68s/it] {'loss': 1.7126, 'grad_norm': 0.006448122705281381, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:37<28:29, 3.68s/it] 11%|█ | 56/520 [03:41<28:28, 3.68s/it] {'loss': 1.8631, 'grad_norm': 0.006043940730945355, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:41<28:28, 3.68s/it] 11%|█ | 57/520 [03:45<28:27, 3.69s/it] {'loss': 1.6865, 'grad_norm': 0.006239036859496, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:45<28:27, 3.69s/it] 11%|█ | 58/520 [03:48<28:21, 3.68s/it] {'loss': 1.8435, 'grad_norm': 0.005348471331631467, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:48<28:21, 3.68s/it] 11%|█▏ | 59/520 [03:52<28:18, 3.68s/it] {'loss': 2.0398, 'grad_norm': 0.013807987005191657, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:52<28:18, 3.68s/it] 12%|█▏ | 60/520 [03:56<28:15, 3.69s/it] {'loss': 1.7494, 'grad_norm': 0.00558080837580595, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:56<28:15, 3.69s/it] 12%|█▏ | 61/520 [03:59<28:12, 3.69s/it] {'loss': 2.1634, 'grad_norm': 0.009880139502380226, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:59<28:12, 3.69s/it] 12%|█▏ | 62/520 [04:03<28:03, 3.68s/it] {'loss': 1.6915, 'grad_norm': 0.005234010269347911, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:03<28:03, 3.68s/it] 12%|█▏ | 63/520 [04:07<28:00, 3.68s/it] {'loss': 1.7139, 'grad_norm': 0.005128275320758463, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:07<28:00, 3.68s/it] 12%|█▏ | 64/520 [04:10<27:54, 3.67s/it] {'loss': 1.713, 'grad_norm': 0.005017568062194214, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:10<27:54, 3.67s/it] 12%|█▎ | 65/520 [04:14<28:00, 3.69s/it] {'loss': 1.7354, 'grad_norm': 0.005592446249607664, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:14<28:00, 3.69s/it] 13%|█▎ | 66/520 [04:18<27:56, 3.69s/it] {'loss': 1.7069, 'grad_norm': 0.004970597652683242, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:18<27:56, 3.69s/it] 13%|█▎ | 67/520 [04:21<27:56, 3.70s/it] {'loss': 1.5206, 'grad_norm': 0.004633262235157754, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:21<27:56, 3.70s/it] 13%|█▎ | 68/520 [04:25<27:48, 3.69s/it] {'loss': 1.5657, 'grad_norm': 0.004616099625166046, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:25<27:48, 3.69s/it] 13%|█▎ | 69/520 [04:29<27:44, 3.69s/it] {'loss': 1.5308, 'grad_norm': 0.005372930176540968, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:29<27:44, 3.69s/it] 13%|█▎ | 70/520 [04:33<27:41, 3.69s/it] {'loss': 1.6168, 'grad_norm': 0.005193746320579325, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:33<27:41, 3.69s/it] 14%|█▎ | 71/520 [04:36<27:43, 3.70s/it] {'loss': 1.5012, 'grad_norm': 0.004780649351828219, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:36<27:43, 3.70s/it] 14%|█▍ | 72/520 [04:40<27:32, 3.69s/it] {'loss': 1.6705, 'grad_norm': 0.004712404851547564, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:40<27:32, 3.69s/it] 14%|█▍ | 73/520 [04:44<27:26, 3.68s/it] {'loss': 1.4881, 'grad_norm': 0.00441749254062193, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:44<27:26, 3.68s/it] 14%|█▍ | 74/520 [04:47<27:22, 3.68s/it] {'loss': 1.6047, 'grad_norm': 0.004785457517171715, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:47<27:22, 3.68s/it] 14%|█▍ | 75/520 [04:51<27:13, 3.67s/it] {'loss': 1.4868, 'grad_norm': 0.004315643232689028, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:51<27:13, 3.67s/it] 15%|█▍ | 76/520 [04:55<27:17, 3.69s/it] {'loss': 2.0872, 'grad_norm': 0.010375275125447628, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:55<27:17, 3.69s/it] 15%|█▍ | 77/520 [04:58<27:20, 3.70s/it] {'loss': 1.4223, 'grad_norm': 0.005016577192521093, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:58<27:20, 3.70s/it] 15%|█▌ | 78/520 [05:02<27:21, 3.71s/it] {'loss': 1.5694, 'grad_norm': 0.004225040111882008, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:02<27:21, 3.71s/it] 15%|█▌ | 79/520 [05:06<27:16, 3.71s/it] {'loss': 1.5421, 'grad_norm': 0.004011187283636123, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:06<27:16, 3.71s/it] 15%|█▌ | 80/520 [05:09<27:07, 3.70s/it] {'loss': 2.0565, 'grad_norm': 0.00575694789300315, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:10<27:07, 3.70s/it] 16%|█▌ | 81/520 [05:13<27:00, 3.69s/it] {'loss': 1.7149, 'grad_norm': 0.004900652952670354, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:13<27:00, 3.69s/it] 16%|█▌ | 82/520 [05:17<26:53, 3.68s/it] {'loss': 1.6266, 'grad_norm': 0.0038316528484613113, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:17<26:53, 3.68s/it] 16%|█▌ | 83/520 [05:21<26:47, 3.68s/it] {'loss': 1.6621, 'grad_norm': 0.004169705449338549, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:21<26:47, 3.68s/it] 16%|█▌ | 84/520 [05:24<26:47, 3.69s/it] {'loss': 1.6306, 'grad_norm': 0.004201819313381892, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:24<26:47, 3.69s/it] 16%|█▋ | 85/520 [05:28<26:43, 3.69s/it] {'loss': 1.6231, 'grad_norm': 0.0038897557483361742, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:28<26:43, 3.69s/it] 17%|█▋ | 86/520 [05:32<26:53, 3.72s/it] {'loss': 1.6898, 'grad_norm': 0.004029150804347731, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:32<26:53, 3.72s/it] 17%|█▋ | 87/520 [05:36<27:04, 3.75s/it] {'loss': 1.9484, 'grad_norm': 0.005335986139575631, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:36<27:04, 3.75s/it] 17%|█▋ | 88/520 [05:39<27:10, 3.77s/it] {'loss': 2.0939, 'grad_norm': 0.005402085658103505, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:39<27:10, 3.77s/it] 17%|█▋ | 89/520 [05:43<27:21, 3.81s/it] {'loss': 1.5849, 'grad_norm': 0.0038325386578172606, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:43<27:21, 3.81s/it] 17%|█▋ | 90/520 [05:47<27:38, 3.86s/it] {'loss': 1.5048, 'grad_norm': 0.0035703320971876397, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:47<27:38, 3.86s/it] 18%|█▊ | 91/520 [05:51<27:38, 3.87s/it] {'loss': 1.6176, 'grad_norm': 0.0034919354310952912, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:51<27:38, 3.87s/it] 18%|█▊ | 92/520 [05:55<27:12, 3.81s/it] {'loss': 1.5446, 'grad_norm': 0.0038002130545673547, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:55<27:12, 3.81s/it] 18%|█▊ | 93/520 [05:58<26:51, 3.77s/it] {'loss': 1.528, 'grad_norm': 0.003862012619498791, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:58<26:51, 3.77s/it] 18%|█▊ | 94/520 [06:02<26:40, 3.76s/it] {'loss': 1.6637, 'grad_norm': 0.003842378764077065, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:02<26:40, 3.76s/it] 18%|█▊ | 95/520 [06:06<26:31, 3.74s/it] {'loss': 1.5069, 'grad_norm': 0.004114592136239951, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:06<26:31, 3.74s/it] 18%|█▊ | 96/520 [06:10<26:16, 3.72s/it] {'loss': 1.5111, 'grad_norm': 0.003250942419363189, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:10<26:16, 3.72s/it] 19%|█▊ | 97/520 [06:13<26:15, 3.72s/it] {'loss': 1.477, 'grad_norm': 0.004038455177660126, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:13<26:15, 3.72s/it] 19%|█▉ | 98/520 [06:17<26:05, 3.71s/it] {'loss': 1.4799, 'grad_norm': 0.0030487164862276037, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:17<26:05, 3.71s/it] 19%|█▉ | 99/520 [06:21<26:01, 3.71s/it] {'loss': 1.5171, 'grad_norm': 0.00385269142908837, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:21<26:01, 3.71s/it] 19%|█▉ | 100/520 [06:24<25:53, 3.70s/it] {'loss': 1.7415, 'grad_norm': 0.004115906013657031, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:24<25:53, 3.70s/it] 19%|█▉ | 101/520 [06:28<25:46, 3.69s/it] {'loss': 1.4858, 'grad_norm': 0.0033785417812729973, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:28<25:46, 3.69s/it] 20%|█▉ | 102/520 [06:32<25:43, 3.69s/it] {'loss': 1.5041, 'grad_norm': 0.003674318246588289, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:32<25:43, 3.69s/it] 20%|█▉ | 103/520 [06:35<25:39, 3.69s/it] {'loss': 1.4272, 'grad_norm': 0.0030451095684369796, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:35<25:39, 3.69s/it] 20%|██ | 104/520 [06:39<25:35, 3.69s/it] {'loss': 1.5123, 'grad_norm': 0.0036169254573245237, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:39<25:35, 3.69s/it] 20%|██ | 105/520 [06:43<25:33, 3.69s/it] {'loss': 1.5171, 'grad_norm': 0.0031549736485149784, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:43<25:33, 3.69s/it] 20%|██ | 106/520 [06:47<25:46, 3.74s/it] {'loss': 1.7329, 'grad_norm': 0.0037058519326590767, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:47<25:46, 3.74s/it] 21%|██ | 107/520 [06:50<25:40, 3.73s/it] {'loss': 1.718, 'grad_norm': 0.004003959797044997, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:50<25:40, 3.73s/it] 21%|██ | 108/520 [06:54<25:33, 3.72s/it] {'loss': 1.4532, 'grad_norm': 0.0033388242297271303, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:54<25:33, 3.72s/it] 21%|██ | 109/520 [06:58<25:27, 3.72s/it] {'loss': 1.6964, 'grad_norm': 0.0036252422952125773, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:58<25:27, 3.72s/it] 21%|██ | 110/520 [07:01<25:18, 3.70s/it] {'loss': 1.6385, 'grad_norm': 0.0035512557660116483, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:01<25:18, 3.70s/it] 21%|██▏ | 111/520 [07:05<25:17, 3.71s/it] {'loss': 1.6545, 'grad_norm': 0.0035004108199644442, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:05<25:17, 3.71s/it] 22%|██▏ | 112/520 [07:09<25:08, 3.70s/it] {'loss': 1.5278, 'grad_norm': 0.0032803729247794225, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:09<25:08, 3.70s/it] 22%|██▏ | 113/520 [07:13<25:04, 3.70s/it] {'loss': 1.3776, 'grad_norm': 0.003073072631949505, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:13<25:04, 3.70s/it] 22%|██▏ | 114/520 [07:16<25:03, 3.70s/it] {'loss': 1.4929, 'grad_norm': 0.003131301360526723, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:16<25:03, 3.70s/it] 22%|██▏ | 115/520 [07:20<25:00, 3.70s/it] {'loss': 1.6469, 'grad_norm': 0.003249284340008841, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:20<25:00, 3.70s/it] 22%|██▏ | 116/520 [07:24<24:51, 3.69s/it] {'loss': 1.6058, 'grad_norm': 0.003134084746140434, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:24<24:51, 3.69s/it] 22%|██▎ | 117/520 [07:27<24:53, 3.71s/it] {'loss': 1.6003, 'grad_norm': 0.003232252119770946, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:27<24:53, 3.71s/it] 23%|██▎ | 118/520 [07:31<24:44, 3.69s/it] {'loss': 1.4563, 'grad_norm': 0.0030262280887343404, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:31<24:44, 3.69s/it] 23%|██▎ | 119/520 [07:35<24:44, 3.70s/it] {'loss': 1.4146, 'grad_norm': 0.0032421797647162287, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:35<24:44, 3.70s/it] 23%|██▎ | 120/520 [07:38<24:39, 3.70s/it] {'loss': 1.4385, 'grad_norm': 0.003597471534977189, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:38<24:39, 3.70s/it] 23%|██▎ | 121/520 [07:42<24:31, 3.69s/it] {'loss': 1.4958, 'grad_norm': 0.0032199790165562455, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:42<24:31, 3.69s/it] 23%|██▎ | 122/520 [07:46<24:41, 3.72s/it] {'loss': 1.3721, 'grad_norm': 0.002881226604838901, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:46<24:41, 3.72s/it] 24%|██▎ | 123/520 [07:50<24:57, 3.77s/it] {'loss': 1.7579, 'grad_norm': 0.004198037599939604, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:50<24:57, 3.77s/it] 24%|██▍ | 124/520 [07:54<25:11, 3.82s/it] {'loss': 1.4657, 'grad_norm': 0.0033470768642883406, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:54<25:11, 3.82s/it] 24%|██▍ | 125/520 [07:58<25:21, 3.85s/it] {'loss': 1.4459, 'grad_norm': 0.003013435359258871, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:58<25:21, 3.85s/it] 24%|██▍ | 126/520 [08:02<26:47, 4.08s/it] {'loss': 1.6274, 'grad_norm': 0.0029070600299157395, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:02<26:47, 4.08s/it] 24%|██▍ | 127/520 [08:06<26:28, 4.04s/it] {'loss': 1.4186, 'grad_norm': 0.003219985721461309, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:06<26:28, 4.04s/it] 25%|██▍ | 128/520 [08:10<26:08, 4.00s/it] {'loss': 1.4806, 'grad_norm': 0.0030154283952380464, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:10<26:08, 4.00s/it] 25%|██▍ | 129/520 [08:14<25:56, 3.98s/it] {'loss': 1.393, 'grad_norm': 0.0025274002875878836, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:14<25:56, 3.98s/it] 25%|██▌ | 130/520 [08:18<25:44, 3.96s/it] {'loss': 1.4819, 'grad_norm': 0.002588252022277323, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:18<25:44, 3.96s/it] 25%|██▌ | 131/520 [08:22<25:36, 3.95s/it] {'loss': 1.6099, 'grad_norm': 0.0031507597016639777, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:22<25:36, 3.95s/it] 25%|██▌ | 132/520 [08:26<25:27, 3.94s/it] {'loss': 1.5226, 'grad_norm': 0.0031552248476840977, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:26<25:27, 3.94s/it] 26%|██▌ | 133/520 [08:30<25:18, 3.92s/it] {'loss': 1.415, 'grad_norm': 0.003254970572109641, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:30<25:18, 3.92s/it] 26%|██▌ | 134/520 [08:34<25:11, 3.92s/it] {'loss': 1.5025, 'grad_norm': 0.0028209811853911657, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:34<25:11, 3.92s/it] 26%|██▌ | 135/520 [08:37<25:03, 3.91s/it] {'loss': 1.5793, 'grad_norm': 0.0029502289178954095, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:37<25:03, 3.91s/it] 26%|██▌ | 136/520 [08:41<24:55, 3.90s/it] {'loss': 1.4922, 'grad_norm': 0.002832415077015808, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:41<24:55, 3.90s/it] 26%|██▋ | 137/520 [08:45<24:56, 3.91s/it] {'loss': 1.3983, 'grad_norm': 0.0032617204518527233, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:45<24:56, 3.91s/it] 27%|██▋ | 138/520 [08:49<24:52, 3.91s/it] {'loss': 1.4257, 'grad_norm': 0.002698263760915433, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:49<24:52, 3.91s/it] 27%|██▋ | 139/520 [08:53<24:50, 3.91s/it] {'loss': 1.4976, 'grad_norm': 0.003198576607870124, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:53<24:50, 3.91s/it] 27%|██▋ | 140/520 [08:57<24:46, 3.91s/it] {'loss': 1.6379, 'grad_norm': 0.0038214423466528024, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:57<24:46, 3.91s/it] 27%|██▋ | 141/520 [09:01<24:41, 3.91s/it] {'loss': 1.556, 'grad_norm': 0.002707094453685787, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:01<24:41, 3.91s/it] 27%|██▋ | 142/520 [09:05<24:34, 3.90s/it] {'loss': 1.6891, 'grad_norm': 0.0028855921427888716, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:05<24:34, 3.90s/it] 28%|██▊ | 143/520 [09:09<24:32, 3.90s/it] {'loss': 1.4677, 'grad_norm': 0.0030427441588094266, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:09<24:32, 3.90s/it] 28%|██▊ | 144/520 [09:13<24:23, 3.89s/it] {'loss': 1.3879, 'grad_norm': 0.002863971059077961, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:13<24:23, 3.89s/it] 28%|██▊ | 145/520 [09:16<24:03, 3.85s/it] {'loss': 1.3311, 'grad_norm': 0.002510995494371998, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:16<24:03, 3.85s/it] 28%|██▊ | 146/520 [09:20<23:41, 3.80s/it] {'loss': 1.7259, 'grad_norm': 0.0031368264346967804, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:20<23:41, 3.80s/it] 28%|██▊ | 147/520 [09:24<23:31, 3.78s/it] {'loss': 1.3815, 'grad_norm': 0.0027228447411790893, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:24<23:31, 3.78s/it] 28%|██▊ | 148/520 [09:27<23:20, 3.76s/it] {'loss': 1.4011, 'grad_norm': 0.0026429318460405513, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:27<23:20, 3.76s/it] 29%|██▊ | 149/520 [09:31<23:10, 3.75s/it] {'loss': 1.3687, 'grad_norm': 0.0027599745965732787, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:31<23:10, 3.75s/it] 29%|██▉ | 150/520 [09:35<23:03, 3.74s/it] {'loss': 1.5907, 'grad_norm': 0.0027322514837130794, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:35<23:03, 3.74s/it] 29%|██▉ | 151/520 [09:39<22:54, 3.72s/it] {'loss': 1.389, 'grad_norm': 0.0026900192773374387, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:39<22:54, 3.72s/it] 29%|██▉ | 152/520 [09:42<22:46, 3.71s/it] {'loss': 1.3586, 'grad_norm': 0.0028679500540299383, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:42<22:46, 3.71s/it] 29%|██▉ | 153/520 [09:46<22:49, 3.73s/it] {'loss': 1.4053, 'grad_norm': 0.002687488149491116, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:46<22:49, 3.73s/it] 30%|██▉ | 154/520 [09:50<22:38, 3.71s/it] {'loss': 1.4948, 'grad_norm': 0.0025952676351226646, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:50<22:38, 3.71s/it] 30%|██▉ | 155/520 [09:53<22:34, 3.71s/it] {'loss': 1.3794, 'grad_norm': 0.0028068171318180258, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:53<22:34, 3.71s/it] 30%|███ | 156/520 [09:57<22:31, 3.71s/it] {'loss': 1.4288, 'grad_norm': 0.002856232809584872, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:57<22:31, 3.71s/it] 30%|███ | 157/520 [10:01<22:27, 3.71s/it] {'loss': 1.7407, 'grad_norm': 0.0030005337530081056, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [10:01<22:27, 3.71s/it] 30%|███ | 158/520 [10:05<22:18, 3.70s/it] {'loss': 1.3941, 'grad_norm': 0.0026472203227360705, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:05<22:18, 3.70s/it] 31%|███ | 159/520 [10:08<22:13, 3.69s/it] {'loss': 1.4228, 'grad_norm': 0.0026374228662356565, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:08<22:13, 3.69s/it] 31%|███ | 160/520 [10:12<22:11, 3.70s/it] {'loss': 1.4666, 'grad_norm': 0.002767313722823068, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:12<22:11, 3.70s/it] 31%|███ | 161/520 [10:16<22:06, 3.69s/it] {'loss': 1.4431, 'grad_norm': 0.0026050311194018056, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:16<22:06, 3.69s/it] 31%|███ | 162/520 [10:19<22:00, 3.69s/it] {'loss': 1.6233, 'grad_norm': 0.002809964496367264, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:19<22:00, 3.69s/it] 31%|███▏ | 163/520 [10:23<21:58, 3.69s/it] {'loss': 1.3205, 'grad_norm': 0.0029478385268647904, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:23<21:58, 3.69s/it] 32%|███▏ | 164/520 [10:27<21:53, 3.69s/it] {'loss': 1.2748, 'grad_norm': 0.002597123285795327, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:27<21:53, 3.69s/it] 32%|███▏ | 165/520 [10:30<21:48, 3.68s/it] {'loss': 1.4132, 'grad_norm': 0.0024054569777431445, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:30<21:48, 3.68s/it] 32%|███▏ | 166/520 [10:34<21:43, 3.68s/it] {'loss': 1.4111, 'grad_norm': 0.002733332726116742, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:34<21:43, 3.68s/it] 32%|███▏ | 167/520 [10:38<21:43, 3.69s/it] {'loss': 1.3969, 'grad_norm': 0.0026111605767915823, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:38<21:43, 3.69s/it] 32%|███▏ | 168/520 [10:41<21:35, 3.68s/it] {'loss': 1.3381, 'grad_norm': 0.0025531693354885615, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:41<21:35, 3.68s/it] 32%|███▎ | 169/520 [10:45<21:30, 3.68s/it] {'loss': 1.4275, 'grad_norm': 0.002467273628833724, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:45<21:30, 3.68s/it] 33%|███▎ | 170/520 [10:49<21:26, 3.67s/it] {'loss': 1.5331, 'grad_norm': 0.0024674605356450876, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:49<21:26, 3.67s/it] 33%|███▎ | 171/520 [10:52<21:20, 3.67s/it] {'loss': 1.3517, 'grad_norm': 0.002746670335436564, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:52<21:20, 3.67s/it] 33%|███▎ | 172/520 [10:56<21:17, 3.67s/it] {'loss': 1.4262, 'grad_norm': 0.002634552970442075, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:56<21:17, 3.67s/it] 33%|███▎ | 173/520 [11:00<21:12, 3.67s/it] {'loss': 1.3468, 'grad_norm': 0.0024334665265256443, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:00<21:12, 3.67s/it] 33%|███▎ | 174/520 [11:03<21:11, 3.68s/it] {'loss': 1.432, 'grad_norm': 0.0027069818759190636, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:03<21:11, 3.68s/it] 34%|███▎ | 175/520 [11:07<21:13, 3.69s/it] {'loss': 1.3394, 'grad_norm': 0.0024324638155664956, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:07<21:13, 3.69s/it] 34%|███▍ | 176/520 [11:11<21:22, 3.73s/it] {'loss': 1.6313, 'grad_norm': 0.002804870920878398, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:11<21:22, 3.73s/it] 34%|███▍ | 177/520 [11:15<21:27, 3.75s/it] {'loss': 1.4919, 'grad_norm': 0.0025230548116800078, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:15<21:27, 3.75s/it] 34%|███▍ | 178/520 [11:19<21:30, 3.77s/it] {'loss': 1.4046, 'grad_norm': 0.0026835879926812244, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:19<21:30, 3.77s/it] 34%|███▍ | 179/520 [11:22<21:31, 3.79s/it] {'loss': 1.497, 'grad_norm': 0.0023766034040002353, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:22<21:31, 3.79s/it] 35%|███▍ | 180/520 [11:26<21:35, 3.81s/it] {'loss': 1.3839, 'grad_norm': 0.0025174245221836443, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:26<21:35, 3.81s/it] 35%|███▍ | 181/520 [11:30<21:35, 3.82s/it] {'loss': 1.3897, 'grad_norm': 0.0021462381612879808, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:30<21:35, 3.82s/it] 35%|███▌ | 182/520 [11:34<21:34, 3.83s/it] {'loss': 1.3812, 'grad_norm': 0.0024844784466954567, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:34<21:34, 3.83s/it] 35%|███▌ | 183/520 [11:38<21:33, 3.84s/it] {'loss': 1.4186, 'grad_norm': 0.0024302227530880907, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:38<21:33, 3.84s/it] 35%|███▌ | 184/520 [11:42<21:29, 3.84s/it] {'loss': 1.3093, 'grad_norm': 0.002694124731352718, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:42<21:29, 3.84s/it] 36%|███▌ | 185/520 [11:45<21:26, 3.84s/it] {'loss': 1.5051, 'grad_norm': 0.0023618743827087878, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:45<21:26, 3.84s/it] 36%|███▌ | 186/520 [11:49<21:26, 3.85s/it] {'loss': 1.329, 'grad_norm': 0.002359079259068166, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:49<21:26, 3.85s/it] 36%|███▌ | 187/520 [11:53<21:08, 3.81s/it] {'loss': 1.339, 'grad_norm': 0.002678406457895704, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:53<21:08, 3.81s/it] 36%|███▌ | 188/520 [11:57<20:53, 3.78s/it] {'loss': 1.4354, 'grad_norm': 0.002651125249042874, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:57<20:53, 3.78s/it] 36%|███▋ | 189/520 [12:01<20:57, 3.80s/it] {'loss': 1.4489, 'grad_norm': 0.002358477150384864, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:01<20:57, 3.80s/it] 37%|███▋ | 190/520 [12:04<20:58, 3.81s/it] {'loss': 1.358, 'grad_norm': 0.0026032851875990664, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:04<20:58, 3.81s/it] 37%|███▋ | 191/520 [12:08<20:56, 3.82s/it] {'loss': 1.3176, 'grad_norm': 0.002341629556241027, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:08<20:56, 3.82s/it] 37%|███▋ | 192/520 [12:12<20:52, 3.82s/it] {'loss': 1.408, 'grad_norm': 0.0022762032428921804, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:12<20:52, 3.82s/it] 37%|███▋ | 193/520 [12:16<20:35, 3.78s/it] {'loss': 1.5612, 'grad_norm': 0.002824128388681578, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:16<20:35, 3.78s/it] 37%|███▋ | 194/520 [12:20<20:23, 3.75s/it] {'loss': 1.429, 'grad_norm': 0.0023647978314375612, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:20<20:23, 3.75s/it] 38%|███▊ | 195/520 [12:23<20:16, 3.74s/it] {'loss': 1.4141, 'grad_norm': 0.002251374859215613, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:23<20:16, 3.74s/it] 38%|███▊ | 196/520 [12:27<20:02, 3.71s/it] {'loss': 1.3882, 'grad_norm': 0.0027313775033425804, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:27<20:02, 3.71s/it] 38%|███▊ | 197/520 [12:31<19:54, 3.70s/it] {'loss': 1.3407, 'grad_norm': 0.0023480134561330206, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:31<19:54, 3.70s/it] 38%|███▊ | 198/520 [12:34<19:49, 3.69s/it] {'loss': 1.4238, 'grad_norm': 0.0025922833507941536, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:34<19:49, 3.69s/it] 38%|███▊ | 199/520 [12:38<19:44, 3.69s/it] {'loss': 1.3407, 'grad_norm': 0.0024840861178487728, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:38<19:44, 3.69s/it] 38%|███▊ | 200/520 [12:42<19:41, 3.69s/it] {'loss': 1.4961, 'grad_norm': 0.0026427388790248767, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:42<19:41, 3.69s/it] 39%|███▊ | 201/520 [12:45<19:37, 3.69s/it] {'loss': 1.4992, 'grad_norm': 0.0022512448250699237, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:45<19:37, 3.69s/it] 39%|███▉ | 202/520 [12:49<19:32, 3.69s/it] {'loss': 1.3183, 'grad_norm': 0.00233277528914413, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:49<19:32, 3.69s/it] 39%|███▉ | 203/520 [12:53<19:26, 3.68s/it] {'loss': 1.3847, 'grad_norm': 0.002538312533377703, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:53<19:26, 3.68s/it] 39%|███▉ | 204/520 [12:56<19:20, 3.67s/it] {'loss': 1.4157, 'grad_norm': 0.0025966705041099956, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:56<19:20, 3.67s/it] 39%|███▉ | 205/520 [13:00<19:19, 3.68s/it] {'loss': 1.5026, 'grad_norm': 0.0024263915385801657, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:00<19:19, 3.68s/it] 40%|███▉ | 206/520 [13:04<19:17, 3.68s/it] {'loss': 1.4543, 'grad_norm': 0.002433794612633515, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:04<19:17, 3.68s/it] 40%|███▉ | 207/520 [13:07<19:16, 3.69s/it] {'loss': 1.5018, 'grad_norm': 0.0025158100497287023, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:07<19:16, 3.69s/it] 40%|████ | 208/520 [13:11<19:11, 3.69s/it] {'loss': 1.4076, 'grad_norm': 0.002606922229952272, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:11<19:11, 3.69s/it] 40%|████ | 209/520 [13:15<19:11, 3.70s/it] {'loss': 1.3549, 'grad_norm': 0.0023238447824521983, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:15<19:11, 3.70s/it] 40%|████ | 210/520 [13:19<19:12, 3.72s/it] {'loss': 1.4078, 'grad_norm': 0.002412077291985409, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:19<19:12, 3.72s/it] 41%|████ | 211/520 [13:22<19:07, 3.71s/it] {'loss': 1.4377, 'grad_norm': 0.002198071572887643, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:22<19:07, 3.71s/it] 41%|████ | 212/520 [13:26<18:58, 3.70s/it] {'loss': 1.4048, 'grad_norm': 0.0024632220131805306, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:26<18:58, 3.70s/it] 41%|████ | 213/520 [13:30<18:56, 3.70s/it] {'loss': 1.3609, 'grad_norm': 0.0026600898027459153, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:30<18:56, 3.70s/it] 41%|████ | 214/520 [13:33<18:48, 3.69s/it] {'loss': 1.3577, 'grad_norm': 0.002436091107730072, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:33<18:48, 3.69s/it] 41%|████▏ | 215/520 [13:37<18:49, 3.70s/it] {'loss': 1.4017, 'grad_norm': 0.0024186817430858023, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:37<18:49, 3.70s/it] 42%|████▏ | 216/520 [13:41<18:44, 3.70s/it] {'loss': 1.2675, 'grad_norm': 0.0024193582112262027, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:41<18:44, 3.70s/it] 42%|████▏ | 217/520 [13:44<18:41, 3.70s/it] {'loss': 1.391, 'grad_norm': 0.002408508689884152, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:44<18:41, 3.70s/it] 42%|████▏ | 218/520 [13:48<18:33, 3.69s/it] {'loss': 1.3972, 'grad_norm': 0.002594758612111533, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:48<18:33, 3.69s/it] 42%|████▏ | 219/520 [13:52<18:31, 3.69s/it] {'loss': 1.3611, 'grad_norm': 0.0022209990848360583, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:52<18:31, 3.69s/it] 42%|████▏ | 220/520 [13:55<18:28, 3.70s/it] {'loss': 1.4748, 'grad_norm': 0.0026632663256857944, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:55<18:28, 3.70s/it] 42%|████▎ | 221/520 [13:59<18:27, 3.70s/it] {'loss': 1.3915, 'grad_norm': 0.002368720271491164, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:59<18:27, 3.70s/it] 43%|████▎ | 222/520 [14:03<18:23, 3.70s/it] {'loss': 1.2953, 'grad_norm': 0.0022546610474742305, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:03<18:23, 3.70s/it] 43%|████▎ | 223/520 [14:07<18:27, 3.73s/it] {'loss': 1.2956, 'grad_norm': 0.00230075654538936, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:07<18:27, 3.73s/it] 43%|████▎ | 224/520 [14:11<18:36, 3.77s/it] {'loss': 1.7553, 'grad_norm': 0.0027153288096924007, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:11<18:36, 3.77s/it] 43%|████▎ | 225/520 [14:15<18:54, 3.85s/it] {'loss': 1.3154, 'grad_norm': 0.002536566869885576, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:15<18:54, 3.85s/it] 43%|████▎ | 226/520 [14:19<19:10, 3.91s/it] {'loss': 1.4124, 'grad_norm': 0.002117402687500955, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:19<19:10, 3.91s/it] 44%|████▎ | 227/520 [14:23<19:04, 3.91s/it] {'loss': 1.4098, 'grad_norm': 0.002180325498391077, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:23<19:04, 3.91s/it] 44%|████▍ | 228/520 [14:26<18:57, 3.90s/it] {'loss': 1.6481, 'grad_norm': 0.002407482849742319, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:26<18:57, 3.90s/it] 44%|████▍ | 229/520 [14:30<18:53, 3.89s/it] {'loss': 1.3887, 'grad_norm': 0.0020548708267009593, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:30<18:53, 3.89s/it] 44%|████▍ | 230/520 [14:34<18:58, 3.93s/it] {'loss': 1.2495, 'grad_norm': 0.0024483571351078294, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:34<18:58, 3.93s/it] 44%|████▍ | 231/520 [14:38<18:43, 3.89s/it] {'loss': 1.3244, 'grad_norm': 0.0022752376837121384, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:38<18:43, 3.89s/it] 45%|████▍ | 232/520 [14:42<18:38, 3.88s/it] {'loss': 1.6581, 'grad_norm': 0.002608076518070533, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:42<18:38, 3.88s/it] 45%|████▍ | 233/520 [14:46<18:34, 3.88s/it] {'loss': 1.5215, 'grad_norm': 0.0024357739131567408, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:46<18:34, 3.88s/it] 45%|████▌ | 234/520 [14:50<18:30, 3.88s/it] {'loss': 1.2747, 'grad_norm': 0.002438164127912936, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:50<18:30, 3.88s/it] 45%|████▌ | 235/520 [14:54<18:27, 3.89s/it] {'loss': 1.3122, 'grad_norm': 0.0022080429937269865, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:54<18:27, 3.89s/it] 45%|████▌ | 236/520 [14:58<18:25, 3.89s/it] {'loss': 1.4516, 'grad_norm': 0.0021236062509363027, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:58<18:25, 3.89s/it] 46%|████▌ | 237/520 [15:01<18:18, 3.88s/it] {'loss': 1.399, 'grad_norm': 0.0022637308349702312, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:01<18:18, 3.88s/it] 46%|████▌ | 238/520 [15:05<18:16, 3.89s/it] {'loss': 1.356, 'grad_norm': 0.0023806550438715194, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:05<18:16, 3.89s/it] 46%|████▌ | 239/520 [15:09<18:11, 3.89s/it] {'loss': 1.4567, 'grad_norm': 0.0023019987824998214, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:09<18:11, 3.89s/it] 46%|████▌ | 240/520 [15:13<18:09, 3.89s/it] {'loss': 1.2041, 'grad_norm': 0.0020144804917014265, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:13<18:09, 3.89s/it] 46%|████▋ | 241/520 [15:17<18:07, 3.90s/it] {'loss': 1.2819, 'grad_norm': 0.00209047721255362, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:17<18:07, 3.90s/it] 47%|████▋ | 242/520 [15:21<18:02, 3.89s/it] {'loss': 1.3156, 'grad_norm': 0.0020661505801556463, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:21<18:02, 3.89s/it] 47%|████▋ | 243/520 [15:25<17:58, 3.89s/it] {'loss': 1.3111, 'grad_norm': 0.002192028177563367, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:25<17:58, 3.89s/it] 47%|████▋ | 244/520 [15:29<17:52, 3.89s/it] {'loss': 1.4538, 'grad_norm': 0.0024179552734917243, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:29<17:52, 3.89s/it] 47%|████▋ | 245/520 [15:33<17:50, 3.89s/it] {'loss': 1.2969, 'grad_norm': 0.0021819685177746975, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:33<17:50, 3.89s/it] 47%|████▋ | 246/520 [15:36<17:49, 3.90s/it] {'loss': 1.6264, 'grad_norm': 0.0023713038554027564, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:36<17:49, 3.90s/it] 48%|████▊ | 247/520 [15:40<17:42, 3.89s/it] {'loss': 1.4714, 'grad_norm': 0.002355351394977268, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:40<17:42, 3.89s/it] 48%|████▊ | 248/520 [15:44<17:32, 3.87s/it] {'loss': 1.2854, 'grad_norm': 0.002132474167042144, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:44<17:32, 3.87s/it] 48%|████▊ | 249/520 [15:48<17:13, 3.81s/it] {'loss': 1.4104, 'grad_norm': 0.0024330459552165664, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:48<17:13, 3.81s/it] 48%|████▊ | 250/520 [15:52<17:00, 3.78s/it] {'loss': 1.3582, 'grad_norm': 0.0024499434501198945, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:52<17:00, 3.78s/it] 48%|████▊ | 251/520 [15:55<16:52, 3.76s/it] {'loss': 1.4055, 'grad_norm': 0.0020924022570994543, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:55<16:52, 3.76s/it] 48%|████▊ | 252/520 [15:59<16:45, 3.75s/it] {'loss': 1.4994, 'grad_norm': 0.0022919122742844278, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:59<16:45, 3.75s/it] 49%|████▊ | 253/520 [16:03<16:40, 3.75s/it] {'loss': 1.401, 'grad_norm': 0.002285956998134899, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:03<16:40, 3.75s/it] 49%|████▉ | 254/520 [16:06<16:32, 3.73s/it] {'loss': 1.309, 'grad_norm': 0.0021858067561158125, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:06<16:32, 3.73s/it] 49%|████▉ | 255/520 [16:10<16:25, 3.72s/it] {'loss': 1.3311, 'grad_norm': 0.0022807768890493753, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:10<16:25, 3.72s/it] 49%|████▉ | 256/520 [16:14<16:19, 3.71s/it] {'loss': 1.3811, 'grad_norm': 0.0023011839974152073, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:14<16:19, 3.71s/it] 49%|████▉ | 257/520 [16:17<16:09, 3.69s/it] {'loss': 1.3909, 'grad_norm': 0.0023774716702161954, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:17<16:09, 3.69s/it] 50%|████▉ | 258/520 [16:21<16:05, 3.69s/it] {'loss': 1.4057, 'grad_norm': 0.002034744493308234, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:21<16:05, 3.69s/it] 50%|████▉ | 259/520 [16:25<15:59, 3.67s/it] {'loss': 1.4584, 'grad_norm': 0.0025595549555463997, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:25<15:59, 3.67s/it] 50%|█████ | 260/520 [16:28<15:55, 3.68s/it] {'loss': 1.6093, 'grad_norm': 0.0022007905744282305, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:28<15:55, 3.68s/it] 50%|█████ | 261/520 [16:32<15:50, 3.67s/it] {'loss': 1.5147, 'grad_norm': 0.002284514640722148, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:32<15:50, 3.67s/it] 50%|█████ | 262/520 [16:36<15:48, 3.68s/it] {'loss': 1.2963, 'grad_norm': 0.002157208178209148, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:36<15:48, 3.68s/it] 51%|█████ | 263/520 [16:39<15:41, 3.67s/it] {'loss': 1.5381, 'grad_norm': 0.002455550235641449, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:39<15:41, 3.67s/it] 51%|█████ | 264/520 [16:43<15:40, 3.67s/it] {'loss': 1.4233, 'grad_norm': 0.0020788956674696064, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:43<15:40, 3.67s/it] 51%|█████ | 265/520 [16:47<15:37, 3.67s/it] {'loss': 1.3045, 'grad_norm': 0.002350313527860506, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:47<15:37, 3.67s/it] 51%|█████ | 266/520 [16:50<15:31, 3.67s/it] {'loss': 1.1602, 'grad_norm': 0.001878368330817411, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:50<15:31, 3.67s/it] 51%|█████▏ | 267/520 [16:54<15:30, 3.68s/it] {'loss': 1.3163, 'grad_norm': 0.0019949771091719707, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:54<15:30, 3.68s/it] 52%|█████▏ | 268/520 [16:58<15:27, 3.68s/it] {'loss': 1.6529, 'grad_norm': 0.002485035548435498, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:58<15:27, 3.68s/it] 52%|█████▏ | 269/520 [17:02<15:23, 3.68s/it] {'loss': 1.4122, 'grad_norm': 0.002327156212818721, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:02<15:23, 3.68s/it] 52%|█████▏ | 270/520 [17:05<15:23, 3.69s/it] {'loss': 1.4472, 'grad_norm': 0.0023250660153601413, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:05<15:23, 3.69s/it] 52%|█████▏ | 271/520 [17:09<15:23, 3.71s/it] {'loss': 1.4146, 'grad_norm': 0.0024367054882648317, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:09<15:23, 3.71s/it] 52%|█████▏ | 272/520 [17:13<15:23, 3.72s/it] {'loss': 1.4576, 'grad_norm': 0.002234836128240299, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:13<15:23, 3.72s/it] 52%|█████▎ | 273/520 [17:16<15:18, 3.72s/it] {'loss': 1.5888, 'grad_norm': 0.002879107390611079, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:16<15:18, 3.72s/it] 53%|█████▎ | 274/520 [17:20<15:09, 3.70s/it] {'loss': 1.3584, 'grad_norm': 0.0022643176297366847, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:20<15:09, 3.70s/it] 53%|█████▎ | 275/520 [17:24<15:05, 3.70s/it] {'loss': 1.3067, 'grad_norm': 0.0022142611073352634, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:24<15:05, 3.70s/it] 53%|█████▎ | 276/520 [17:27<15:00, 3.69s/it] {'loss': 1.3969, 'grad_norm': 0.0024960371348280988, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:27<15:00, 3.69s/it] 53%|█████▎ | 277/520 [17:31<14:58, 3.70s/it] {'loss': 1.5621, 'grad_norm': 0.002106955151603864, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:31<14:58, 3.70s/it] 53%|█████▎ | 278/520 [17:35<14:56, 3.71s/it] {'loss': 1.2644, 'grad_norm': 0.001977485641817482, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:35<14:56, 3.71s/it] 54%|█████▎ | 279/520 [17:39<14:52, 3.70s/it] {'loss': 1.4784, 'grad_norm': 0.002326767273756956, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:39<14:52, 3.70s/it] 54%|█████▍ | 280/520 [17:42<14:46, 3.70s/it] {'loss': 1.3041, 'grad_norm': 0.0023224746593221222, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:42<14:46, 3.70s/it] 54%|█████▍ | 281/520 [17:46<14:41, 3.69s/it] {'loss': 1.427, 'grad_norm': 0.0022595140450641974, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:46<14:41, 3.69s/it] 54%|█████▍ | 282/520 [17:50<14:39, 3.69s/it] {'loss': 1.2757, 'grad_norm': 0.0020177343883045515, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:50<14:39, 3.69s/it] 54%|█████▍ | 283/520 [17:53<14:36, 3.70s/it] {'loss': 1.4691, 'grad_norm': 0.002313875957646976, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:53<14:36, 3.70s/it] 55%|█████▍ | 284/520 [17:57<14:32, 3.70s/it] {'loss': 1.4248, 'grad_norm': 0.0024263476708133195, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:57<14:32, 3.70s/it] 55%|█████▍ | 285/520 [18:01<14:31, 3.71s/it] {'loss': 1.2956, 'grad_norm': 0.002135881299069223, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:01<14:31, 3.71s/it] 55%|█████▌ | 286/520 [18:05<14:42, 3.77s/it] {'loss': 1.1677, 'grad_norm': 0.002047041409198335, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:05<14:42, 3.77s/it] 55%|█████▌ | 287/520 [18:09<14:47, 3.81s/it] {'loss': 1.4211, 'grad_norm': 0.00218239563037067, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:09<14:47, 3.81s/it] 55%|█████▌ | 288/520 [18:13<14:54, 3.86s/it] {'loss': 1.4686, 'grad_norm': 0.0020272876916880944, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:13<14:54, 3.86s/it] 56%|█████▌ | 289/520 [18:16<14:53, 3.87s/it] {'loss': 1.3157, 'grad_norm': 0.002050772602940309, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:16<14:53, 3.87s/it] 56%|█████▌ | 290/520 [18:20<14:50, 3.87s/it] {'loss': 1.2295, 'grad_norm': 0.0019362812504523795, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:20<14:50, 3.87s/it] 56%|█████▌ | 291/520 [18:24<14:48, 3.88s/it] {'loss': 1.2865, 'grad_norm': 0.0021808470185991535, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:24<14:48, 3.88s/it] 56%|█████▌ | 292/520 [18:28<14:43, 3.87s/it] {'loss': 1.3351, 'grad_norm': 0.0020735910462269086, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:28<14:43, 3.87s/it] 56%|█████▋ | 293/520 [18:32<14:39, 3.87s/it] {'loss': 1.2758, 'grad_norm': 0.002245150860720256, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:32<14:39, 3.87s/it] 57%|█████▋ | 294/520 [18:36<14:38, 3.89s/it] {'loss': 1.3128, 'grad_norm': 0.0022427888698947854, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:36<14:38, 3.89s/it] 57%|█████▋ | 295/520 [18:40<14:34, 3.89s/it] {'loss': 1.5338, 'grad_norm': 0.0023197204962123216, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:40<14:34, 3.89s/it] 57%|█████▋ | 296/520 [18:44<14:30, 3.89s/it] {'loss': 1.2529, 'grad_norm': 0.0022087188088594895, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:44<14:30, 3.89s/it] 57%|█████▋ | 297/520 [18:48<14:26, 3.89s/it] {'loss': 1.3956, 'grad_norm': 0.0023520843855477027, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:48<14:26, 3.89s/it] 57%|█████▋ | 298/520 [18:51<14:20, 3.87s/it] {'loss': 1.3698, 'grad_norm': 0.002052496013583131, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:51<14:20, 3.87s/it] 57%|█████▊ | 299/520 [18:55<14:18, 3.89s/it] {'loss': 1.5261, 'grad_norm': 0.0022060417558786337, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:55<14:18, 3.89s/it] 58%|█████▊ | 300/520 [18:59<14:13, 3.88s/it] {'loss': 1.4174, 'grad_norm': 0.0022551729773339895, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:59<14:13, 3.88s/it] 58%|█████▊ | 301/520 [19:03<14:13, 3.90s/it] {'loss': 1.3947, 'grad_norm': 0.0021797030194413195, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:03<14:13, 3.90s/it] 58%|█████▊ | 302/520 [19:07<14:11, 3.91s/it] {'loss': 1.5451, 'grad_norm': 0.0021511983111765247, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:07<14:11, 3.91s/it] 58%|█████▊ | 303/520 [19:11<14:08, 3.91s/it] {'loss': 1.3295, 'grad_norm': 0.002413314542517407, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:11<14:08, 3.91s/it] 58%|█████▊ | 304/520 [19:15<14:07, 3.92s/it] {'loss': 1.4346, 'grad_norm': 0.002155337808662819, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:15<14:07, 3.92s/it] 59%|█████▊ | 305/520 [19:19<14:01, 3.91s/it] {'loss': 1.452, 'grad_norm': 0.0026931079792994273, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:19<14:01, 3.91s/it] 59%|█████▉ | 306/520 [19:23<13:57, 3.92s/it] {'loss': 1.3616, 'grad_norm': 0.0021600551414005715, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:23<13:57, 3.92s/it] 59%|█████▉ | 307/520 [19:27<13:53, 3.91s/it] {'loss': 1.3156, 'grad_norm': 0.002071563993298826, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:27<13:53, 3.91s/it] 59%|█████▉ | 308/520 [19:31<13:46, 3.90s/it] {'loss': 1.4327, 'grad_norm': 0.0021187123707292047, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:31<13:46, 3.90s/it] 59%|█████▉ | 309/520 [19:35<14:06, 4.01s/it] {'loss': 1.3013, 'grad_norm': 0.0020643275535989973, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:35<14:06, 4.01s/it] 60%|█████▉ | 310/520 [19:39<13:56, 3.98s/it] {'loss': 1.279, 'grad_norm': 0.002183685269349483, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:39<13:56, 3.98s/it] 60%|█████▉ | 311/520 [19:43<13:48, 3.96s/it] {'loss': 1.2464, 'grad_norm': 0.0020516646982761816, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:43<13:48, 3.96s/it] 60%|██████ | 312/520 [19:47<13:44, 3.96s/it] {'loss': 1.245, 'grad_norm': 0.002235897128507709, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:47<13:44, 3.96s/it] 60%|██████ | 313/520 [19:50<13:30, 3.92s/it] {'loss': 1.2405, 'grad_norm': 0.0019557348659115514, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:50<13:30, 3.92s/it] 60%|██████ | 314/520 [19:55<13:42, 3.99s/it] {'loss': 1.2652, 'grad_norm': 0.002031521618353235, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:55<13:42, 3.99s/it] 61%|██████ | 315/520 [19:58<13:21, 3.91s/it] {'loss': 1.5519, 'grad_norm': 0.0024456868190029985, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:58<13:21, 3.91s/it] 61%|██████ | 316/520 [20:02<13:25, 3.95s/it] {'loss': 1.2344, 'grad_norm': 0.002238384454904101, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [20:02<13:25, 3.95s/it] 61%|██████ | 317/520 [20:06<13:07, 3.88s/it] {'loss': 1.2597, 'grad_norm': 0.0019168540182151234, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [20:06<13:07, 3.88s/it] 61%|██████ | 318/520 [20:10<12:51, 3.82s/it] {'loss': 1.407, 'grad_norm': 0.002289883772966617, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:10<12:51, 3.82s/it] 61%|██████▏ | 319/520 [20:14<13:02, 3.89s/it] {'loss': 1.2473, 'grad_norm': 0.002044537891803064, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:14<13:02, 3.89s/it] 62%|██████▏ | 320/520 [20:17<12:46, 3.83s/it] {'loss': 1.1892, 'grad_norm': 0.001976148652063698, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:17<12:46, 3.83s/it] 62%|██████▏ | 321/520 [20:21<12:35, 3.79s/it] {'loss': 1.3999, 'grad_norm': 0.002213186248139938, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:21<12:35, 3.79s/it] 62%|██████▏ | 322/520 [20:25<12:26, 3.77s/it] {'loss': 1.3628, 'grad_norm': 0.002131121483293507, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:25<12:26, 3.77s/it] 62%|██████▏ | 323/520 [20:29<12:21, 3.76s/it] {'loss': 1.4506, 'grad_norm': 0.0022906957373330984, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:29<12:21, 3.76s/it] 62%|██████▏ | 324/520 [20:32<12:13, 3.74s/it] {'loss': 1.3414, 'grad_norm': 0.0020721227700451505, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:32<12:13, 3.74s/it] 62%|██████▎ | 325/520 [20:36<12:05, 3.72s/it] {'loss': 1.3412, 'grad_norm': 0.002308519249770243, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:36<12:05, 3.72s/it] 63%|██████▎ | 326/520 [20:40<12:00, 3.71s/it] {'loss': 1.3287, 'grad_norm': 0.0022917684076614025, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:40<12:00, 3.71s/it] 63%|██████▎ | 327/520 [20:43<11:57, 3.72s/it] {'loss': 1.532, 'grad_norm': 0.002415100169518157, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:43<11:57, 3.72s/it] 63%|██████▎ | 328/520 [20:47<11:50, 3.70s/it] {'loss': 1.4118, 'grad_norm': 0.0023119077822611914, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:47<11:50, 3.70s/it] 63%|██████▎ | 329/520 [20:51<11:46, 3.70s/it] {'loss': 1.2468, 'grad_norm': 0.0018851378677157747, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:51<11:46, 3.70s/it] 63%|██████▎ | 330/520 [20:55<11:43, 3.70s/it] {'loss': 1.3253, 'grad_norm': 0.0020233271592246527, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:55<11:43, 3.70s/it] 64%|██████▎ | 331/520 [20:58<11:39, 3.70s/it] {'loss': 1.2913, 'grad_norm': 0.002193065005312778, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:58<11:39, 3.70s/it] 64%|██████▍ | 332/520 [21:02<11:37, 3.71s/it] {'loss': 1.5374, 'grad_norm': 0.002270620821225134, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:02<11:37, 3.71s/it] 64%|██████▍ | 333/520 [21:06<11:33, 3.71s/it] {'loss': 1.4677, 'grad_norm': 0.0023019751618711423, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:06<11:33, 3.71s/it] 64%|██████▍ | 334/520 [21:09<11:29, 3.71s/it] {'loss': 1.343, 'grad_norm': 0.0022259454680732867, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:09<11:29, 3.71s/it] 64%|██████▍ | 335/520 [21:13<11:23, 3.70s/it] {'loss': 1.3381, 'grad_norm': 0.0021278743451279966, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:13<11:23, 3.70s/it] 65%|██████▍ | 336/520 [21:17<11:21, 3.71s/it] {'loss': 1.2276, 'grad_norm': 0.002276994808251424, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:17<11:21, 3.71s/it] 65%|██████▍ | 337/520 [21:20<11:17, 3.70s/it] {'loss': 1.2313, 'grad_norm': 0.002309133724320415, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:20<11:17, 3.70s/it] 65%|██████▌ | 338/520 [21:24<11:14, 3.70s/it] {'loss': 1.3585, 'grad_norm': 0.0022673063607988172, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:24<11:14, 3.70s/it] 65%|██████▌ | 339/520 [21:28<11:11, 3.71s/it] {'loss': 1.2971, 'grad_norm': 0.002035330786104286, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:28<11:11, 3.71s/it] 65%|██████▌ | 340/520 [21:32<11:20, 3.78s/it] {'loss': 1.27, 'grad_norm': 0.002039528697649076, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:32<11:20, 3.78s/it] 66%|██████▌ | 341/520 [21:36<11:22, 3.81s/it] {'loss': 1.3012, 'grad_norm': 0.0022287920474363754, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:36<11:22, 3.81s/it] 66%|██████▌ | 342/520 [21:40<11:28, 3.87s/it] {'loss': 1.5099, 'grad_norm': 0.002746188438425344, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:40<11:28, 3.87s/it] 66%|██████▌ | 343/520 [21:44<11:25, 3.87s/it] {'loss': 1.4764, 'grad_norm': 0.0020790651686089976, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:44<11:25, 3.87s/it] 66%|██████▌ | 344/520 [21:47<11:21, 3.87s/it] {'loss': 1.2451, 'grad_norm': 0.0019139210497391172, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:47<11:21, 3.87s/it] 66%|██████▋ | 345/520 [21:51<11:16, 3.87s/it] {'loss': 1.3722, 'grad_norm': 0.002202836211920092, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:51<11:16, 3.87s/it] 67%|██████▋ | 346/520 [21:55<11:23, 3.93s/it] {'loss': 1.4525, 'grad_norm': 0.0022201233816997097, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:55<11:23, 3.93s/it] 67%|██████▋ | 347/520 [21:59<11:27, 3.98s/it] {'loss': 1.2609, 'grad_norm': 0.001977587604474117, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:59<11:27, 3.98s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:03<11:16, 3.93s/it] {'loss': 1.2321, 'grad_norm': 0.0023127055562145176, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:03<11:16, 3.93s/it] 67%|██████▋ | 349/520 [22:07<11:08, 3.91s/it] {'loss': 1.276, 'grad_norm': 0.0020940332271745113, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:07<11:08, 3.91s/it] 67%|██████▋ | 350/520 [22:11<10:59, 3.88s/it] {'loss': 1.3028, 'grad_norm': 0.0021211965284969724, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:11<10:59, 3.88s/it] 68%|██████▊ | 351/520 [22:15<10:53, 3.87s/it] {'loss': 1.2111, 'grad_norm': 0.0020171498046958917, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:15<10:53, 3.87s/it] 68%|██████▊ | 352/520 [22:19<10:47, 3.85s/it] {'loss': 1.35, 'grad_norm': 0.0021333981380962895, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:19<10:47, 3.85s/it] 68%|██████▊ | 353/520 [22:22<10:42, 3.85s/it] {'loss': 1.4024, 'grad_norm': 0.0018511774138146877, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:22<10:42, 3.85s/it] 68%|██████▊ | 354/520 [22:26<10:38, 3.85s/it] {'loss': 1.5484, 'grad_norm': 0.0020812546074676585, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:26<10:38, 3.85s/it] 68%|██████▊ | 355/520 [22:30<10:34, 3.84s/it] {'loss': 1.291, 'grad_norm': 0.0020007267024248227, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:30<10:34, 3.84s/it] 68%|██████▊ | 356/520 [22:34<10:29, 3.84s/it] {'loss': 1.2847, 'grad_norm': 0.0021731556090128783, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:34<10:29, 3.84s/it] 69%|██████▊ | 357/520 [22:38<10:25, 3.83s/it] {'loss': 1.2915, 'grad_norm': 0.0018726736676978484, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:38<10:25, 3.83s/it] 69%|██████▉ | 358/520 [22:42<10:21, 3.84s/it] {'loss': 1.2342, 'grad_norm': 0.002272517580697083, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:42<10:21, 3.84s/it] 69%|██████▉ | 359/520 [22:45<10:18, 3.84s/it] {'loss': 1.4756, 'grad_norm': 0.002262913628348657, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:45<10:18, 3.84s/it] 69%|██████▉ | 360/520 [22:49<10:13, 3.84s/it] {'loss': 1.4894, 'grad_norm': 0.0022492707684779213, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:49<10:13, 3.84s/it] 69%|██████▉ | 361/520 [22:53<10:09, 3.83s/it] {'loss': 1.4758, 'grad_norm': 0.002002324700712495, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:53<10:09, 3.83s/it] 70%|██████▉ | 362/520 [22:57<10:06, 3.84s/it] {'loss': 1.2825, 'grad_norm': 0.002327336878820488, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:57<10:06, 3.84s/it] 70%|██████▉ | 363/520 [23:01<10:04, 3.85s/it] {'loss': 1.3429, 'grad_norm': 0.002158199608059703, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [23:01<10:04, 3.85s/it] 70%|███████ | 364/520 [23:05<10:01, 3.86s/it] {'loss': 1.5052, 'grad_norm': 0.0022147596022748053, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [23:05<10:01, 3.86s/it] 70%|███████ | 365/520 [23:09<09:58, 3.86s/it] {'loss': 1.3975, 'grad_norm': 0.002235266145591204, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [23:09<09:58, 3.86s/it] 70%|███████ | 366/520 [23:12<09:54, 3.86s/it] {'loss': 1.3481, 'grad_norm': 0.001988290725642302, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:12<09:54, 3.86s/it] 71%|███████ | 367/520 [23:16<09:54, 3.89s/it] {'loss': 1.3559, 'grad_norm': 0.002287382488768957, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:16<09:54, 3.89s/it] 71%|███████ | 368/520 [23:20<09:42, 3.84s/it] {'loss': 1.1926, 'grad_norm': 0.0022580797007444044, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:20<09:42, 3.84s/it] 71%|███████ | 369/520 [23:24<09:33, 3.80s/it] {'loss': 1.4644, 'grad_norm': 0.0021226259942788315, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:24<09:33, 3.80s/it] 71%|███████ | 370/520 [23:28<09:26, 3.77s/it] {'loss': 1.2597, 'grad_norm': 0.002055567300841107, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:28<09:26, 3.77s/it] 71%|███████▏ | 371/520 [23:31<09:21, 3.77s/it] {'loss': 1.2483, 'grad_norm': 0.002138540186330765, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:31<09:21, 3.77s/it] 72%|███████▏ | 372/520 [23:35<09:26, 3.83s/it] {'loss': 1.5617, 'grad_norm': 0.0020700427457097507, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:35<09:26, 3.83s/it] 72%|███████▏ | 373/520 [23:39<09:27, 3.86s/it] {'loss': 1.4346, 'grad_norm': 0.0023545368158618533, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:39<09:27, 3.86s/it] 72%|███████▏ | 374/520 [23:43<09:21, 3.85s/it] {'loss': 1.3442, 'grad_norm': 0.00224967919427254, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:43<09:21, 3.85s/it] 72%|███████▏ | 375/520 [23:47<09:14, 3.82s/it] {'loss': 1.2354, 'grad_norm': 0.0021173504334543406, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:47<09:14, 3.82s/it] 72%|███████▏ | 376/520 [23:51<09:05, 3.79s/it] {'loss': 1.3656, 'grad_norm': 0.0019304042394760316, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:51<09:05, 3.79s/it] 72%|███████▎ | 377/520 [23:54<08:58, 3.77s/it] {'loss': 1.3136, 'grad_norm': 0.002192355318919811, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:54<08:58, 3.77s/it] 73%|███████▎ | 378/520 [23:58<08:50, 3.74s/it] {'loss': 1.3553, 'grad_norm': 0.0019541136269539644, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:58<08:50, 3.74s/it] 73%|███████▎ | 379/520 [24:02<08:43, 3.71s/it] {'loss': 1.3544, 'grad_norm': 0.002075476449216743, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [24:02<08:43, 3.71s/it] 73%|███████▎ | 380/520 [24:05<08:38, 3.70s/it] {'loss': 1.5505, 'grad_norm': 0.0022351321011225356, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:05<08:38, 3.70s/it] 73%|███████▎ | 381/520 [24:09<08:33, 3.70s/it] {'loss': 1.3459, 'grad_norm': 0.0020760376689149236, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:09<08:33, 3.70s/it] 73%|███████▎ | 382/520 [24:13<08:29, 3.69s/it] {'loss': 1.4733, 'grad_norm': 0.0023080342553576058, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:13<08:29, 3.69s/it] 74%|███████▎ | 383/520 [24:16<08:25, 3.69s/it] {'loss': 1.1854, 'grad_norm': 0.0023477562008040525, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:16<08:25, 3.69s/it] 74%|███████▍ | 384/520 [24:20<08:20, 3.68s/it] {'loss': 1.6607, 'grad_norm': 0.002237353459245763, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:20<08:20, 3.68s/it] 74%|███████▍ | 385/520 [24:24<08:17, 3.69s/it] {'loss': 1.3232, 'grad_norm': 0.0019277777332896262, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:24<08:17, 3.69s/it] 74%|███████▍ | 386/520 [24:27<08:13, 3.68s/it] {'loss': 1.2597, 'grad_norm': 0.0018656551558097628, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:27<08:13, 3.68s/it] 74%|███████▍ | 387/520 [24:31<08:10, 3.69s/it] {'loss': 1.5655, 'grad_norm': 0.002135652377807778, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:31<08:10, 3.69s/it] 75%|███████▍ | 388/520 [24:35<08:08, 3.70s/it] {'loss': 1.2161, 'grad_norm': 0.0019561651460359426, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:35<08:08, 3.70s/it] 75%|███████▍ | 389/520 [24:38<08:04, 3.70s/it] {'loss': 1.2814, 'grad_norm': 0.0023950604382821676, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:38<08:04, 3.70s/it] 75%|███████▌ | 390/520 [24:42<08:00, 3.70s/it] {'loss': 1.3434, 'grad_norm': 0.002049246820728304, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:42<08:00, 3.70s/it] 75%|███████▌ | 391/520 [24:46<07:57, 3.70s/it] {'loss': 1.4369, 'grad_norm': 0.002172365749333074, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:46<07:57, 3.70s/it] 75%|███████▌ | 392/520 [24:50<07:55, 3.71s/it] {'loss': 1.2302, 'grad_norm': 0.0019887993562892497, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:50<07:55, 3.71s/it] 76%|███████▌ | 393/520 [24:53<07:51, 3.71s/it] {'loss': 1.3498, 'grad_norm': 0.0019876591929663045, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:53<07:51, 3.71s/it] 76%|███████▌ | 394/520 [24:57<07:45, 3.70s/it] {'loss': 1.2992, 'grad_norm': 0.002205656087756363, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:57<07:45, 3.70s/it] 76%|███████▌ | 395/520 [25:01<07:42, 3.70s/it] {'loss': 1.2558, 'grad_norm': 0.0020915393175767773, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [25:01<07:42, 3.70s/it] 76%|███████▌ | 396/520 [25:04<07:37, 3.69s/it] {'loss': 1.3511, 'grad_norm': 0.0024186142902110387, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:04<07:37, 3.69s/it] 76%|███████▋ | 397/520 [25:08<07:32, 3.68s/it] {'loss': 1.3364, 'grad_norm': 0.001983793893700884, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:08<07:32, 3.68s/it] 77%|███████▋ | 398/520 [25:12<07:28, 3.68s/it] {'loss': 1.3122, 'grad_norm': 0.002148096382549023, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:12<07:28, 3.68s/it] 77%|███████▋ | 399/520 [25:15<07:26, 3.69s/it] {'loss': 1.4089, 'grad_norm': 0.0021877486631017794, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:15<07:26, 3.69s/it] 77%|███████▋ | 400/520 [25:19<07:23, 3.69s/it] {'loss': 1.4776, 'grad_norm': 0.0022800437395368188, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:19<07:23, 3.69s/it] 77%|███████▋ | 401/520 [25:23<07:19, 3.69s/it] {'loss': 1.1253, 'grad_norm': 0.0021536222245117365, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:23<07:19, 3.69s/it] 77%|███████▋ | 402/520 [25:26<07:14, 3.68s/it] {'loss': 1.2514, 'grad_norm': 0.0019815967003650944, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:26<07:14, 3.68s/it] 78%|███████▊ | 403/520 [25:30<07:15, 3.72s/it] {'loss': 1.3016, 'grad_norm': 0.002262037588191985, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:30<07:15, 3.72s/it] 78%|███████▊ | 404/520 [25:34<07:13, 3.74s/it] {'loss': 1.2075, 'grad_norm': 0.002449376915087528, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:34<07:13, 3.74s/it] 78%|███████▊ | 405/520 [25:38<07:08, 3.73s/it] {'loss': 1.4197, 'grad_norm': 0.0021685171695943954, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:38<07:08, 3.73s/it] 78%|███████▊ | 406/520 [25:41<07:03, 3.72s/it] {'loss': 1.3486, 'grad_norm': 0.002694047501865988, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:41<07:03, 3.72s/it] 78%|███████▊ | 407/520 [25:45<06:59, 3.72s/it] {'loss': 1.4, 'grad_norm': 0.0023729826577415197, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:45<06:59, 3.72s/it] 78%|███████▊ | 408/520 [25:49<06:54, 3.70s/it] {'loss': 1.2712, 'grad_norm': 0.0022234624079614635, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:49<06:54, 3.70s/it] 79%|███████▊ | 409/520 [25:52<06:49, 3.69s/it] {'loss': 1.4208, 'grad_norm': 0.0023835551120861584, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:52<06:49, 3.69s/it] 79%|███████▉ | 410/520 [25:56<06:46, 3.69s/it] {'loss': 1.1247, 'grad_norm': 0.00218825388502061, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:56<06:46, 3.69s/it] 79%|███████▉ | 411/520 [26:00<06:42, 3.69s/it] {'loss': 1.3867, 'grad_norm': 0.0022906965577336957, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [26:00<06:42, 3.69s/it] 79%|███████▉ | 412/520 [26:04<06:39, 3.70s/it] {'loss': 1.3023, 'grad_norm': 0.002194740733520537, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:04<06:39, 3.70s/it] 79%|███████▉ | 413/520 [26:07<06:36, 3.71s/it] {'loss': 1.4792, 'grad_norm': 0.002695249940266025, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:07<06:36, 3.71s/it] 80%|███████▉ | 414/520 [26:11<06:32, 3.70s/it] {'loss': 1.221, 'grad_norm': 0.001935480247433722, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:11<06:32, 3.70s/it] 80%|███████▉ | 415/520 [26:15<06:28, 3.70s/it] {'loss': 1.2755, 'grad_norm': 0.001916545595747216, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:15<06:28, 3.70s/it] 80%|████████ | 416/520 [26:18<06:24, 3.70s/it] {'loss': 1.1816, 'grad_norm': 0.002414063430837412, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:18<06:24, 3.70s/it] 80%|████████ | 417/520 [26:22<06:20, 3.69s/it] {'loss': 1.3574, 'grad_norm': 0.0021678149972009816, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:22<06:20, 3.69s/it] 80%|████████ | 418/520 [26:26<06:17, 3.70s/it] {'loss': 1.3554, 'grad_norm': 0.00188512954746494, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:26<06:17, 3.70s/it] 81%|████████ | 419/520 [26:29<06:13, 3.70s/it] {'loss': 1.328, 'grad_norm': 0.002264246844273998, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:29<06:13, 3.70s/it] 81%|████████ | 420/520 [26:33<06:10, 3.70s/it] {'loss': 1.2022, 'grad_norm': 0.0021669760978161455, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:33<06:10, 3.70s/it] 81%|████████ | 421/520 [26:37<06:06, 3.70s/it] {'loss': 1.1383, 'grad_norm': 0.002380608350080244, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:37<06:06, 3.70s/it] 81%|████████ | 422/520 [26:41<06:02, 3.69s/it] {'loss': 1.2724, 'grad_norm': 0.0021489179457485573, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:41<06:02, 3.69s/it] 81%|████████▏ | 423/520 [26:44<05:57, 3.69s/it] {'loss': 1.2683, 'grad_norm': 0.002344185774210196, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:44<05:57, 3.69s/it] 82%|████████▏ | 424/520 [26:48<05:54, 3.69s/it] {'loss': 1.5563, 'grad_norm': 0.0022927641861878816, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:48<05:54, 3.69s/it] 82%|████████▏ | 425/520 [26:52<05:49, 3.68s/it] {'loss': 1.2664, 'grad_norm': 0.002201564647793242, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:52<05:49, 3.68s/it] 82%|████████▏ | 426/520 [26:55<05:47, 3.70s/it] {'loss': 1.3062, 'grad_norm': 0.0027773005505203644, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:55<05:47, 3.70s/it] 82%|████████▏ | 427/520 [26:59<05:43, 3.69s/it] {'loss': 1.2129, 'grad_norm': 0.0022758809292129506, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:59<05:43, 3.69s/it] 82%|████████▏ | 428/520 [27:03<05:38, 3.68s/it] {'loss': 1.1755, 'grad_norm': 0.002175381839468784, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:03<05:38, 3.68s/it] 82%|████████▎ | 429/520 [27:06<05:35, 3.69s/it] {'loss': 1.2877, 'grad_norm': 0.0020947458441729024, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:06<05:35, 3.69s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:10<05:31, 3.68s/it] {'loss': 1.2784, 'grad_norm': 0.0019764921628174954, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:10<05:31, 3.68s/it] 83%|████████▎ | 431/520 [27:14<05:28, 3.69s/it] {'loss': 1.4328, 'grad_norm': 0.002318794945277179, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:14<05:28, 3.69s/it] 83%|████████▎ | 432/520 [27:17<05:25, 3.70s/it] {'loss': 1.1803, 'grad_norm': 0.0020319184995710913, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:17<05:25, 3.70s/it] 83%|████████▎ | 433/520 [27:21<05:21, 3.70s/it] {'loss': 1.3298, 'grad_norm': 0.002021635074368023, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:21<05:21, 3.70s/it] 83%|████████▎ | 434/520 [27:25<05:18, 3.70s/it] {'loss': 1.0486, 'grad_norm': 0.002012889320293248, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:25<05:18, 3.70s/it] 84%|████████▎ | 435/520 [27:28<05:13, 3.68s/it] {'loss': 1.3791, 'grad_norm': 0.0022898950036703697, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:28<05:13, 3.68s/it] 84%|████████▍ | 436/520 [27:32<05:09, 3.68s/it] {'loss': 1.1532, 'grad_norm': 0.0021286873111460796, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:32<05:09, 3.68s/it] 84%|████████▍ | 437/520 [27:36<05:05, 3.68s/it] {'loss': 1.4032, 'grad_norm': 0.002136737242451922, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:36<05:05, 3.68s/it] 84%|████████▍ | 438/520 [27:40<05:01, 3.68s/it] {'loss': 1.1832, 'grad_norm': 0.0020653333188328663, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:40<05:01, 3.68s/it] 84%|████████▍ | 439/520 [27:43<04:58, 3.68s/it] {'loss': 1.3756, 'grad_norm': 0.0019123001712321294, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:43<04:58, 3.68s/it] 85%|████████▍ | 440/520 [27:47<04:54, 3.68s/it] {'loss': 1.2534, 'grad_norm': 0.002105397286633185, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:47<04:54, 3.68s/it] 85%|████████▍ | 441/520 [27:51<04:51, 3.69s/it] {'loss': 1.4249, 'grad_norm': 0.002181512518936083, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:51<04:51, 3.69s/it] 85%|████████▌ | 442/520 [27:54<04:47, 3.69s/it] {'loss': 1.3003, 'grad_norm': 0.0025493084667517385, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:54<04:47, 3.69s/it] 85%|████████▌ | 443/520 [27:58<04:44, 3.69s/it] {'loss': 1.3269, 'grad_norm': 0.0020030211452230174, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:58<04:44, 3.69s/it] 85%|████████▌ | 444/520 [28:02<04:41, 3.71s/it] {'loss': 1.2917, 'grad_norm': 0.0019248555996648961, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:02<04:41, 3.71s/it] 86%|████████▌ | 445/520 [28:05<04:37, 3.70s/it] {'loss': 1.2045, 'grad_norm': 0.0019934591255468983, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:05<04:37, 3.70s/it] 86%|████████▌ | 446/520 [28:09<04:33, 3.70s/it] {'loss': 1.5, 'grad_norm': 0.0020271654052361953, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:09<04:33, 3.70s/it] 86%|████████▌ | 447/520 [28:13<04:29, 3.69s/it] {'loss': 1.3074, 'grad_norm': 0.0021198667462413033, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:13<04:29, 3.69s/it] 86%|████████▌ | 448/520 [28:16<04:25, 3.69s/it] {'loss': 1.2788, 'grad_norm': 0.0024355784690692213, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:16<04:25, 3.69s/it] 86%|████████▋ | 449/520 [28:20<04:21, 3.69s/it] {'loss': 1.4591, 'grad_norm': 0.0022303426317293613, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:20<04:21, 3.69s/it] 87%|████████▋ | 450/520 [28:24<04:17, 3.68s/it] {'loss': 1.3312, 'grad_norm': 0.002181758105022103, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:24<04:17, 3.68s/it] 87%|████████▋ | 451/520 [28:28<04:14, 3.69s/it] {'loss': 1.3197, 'grad_norm': 0.002137229536121581, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:28<04:14, 3.69s/it] 87%|████████▋ | 452/520 [28:31<04:14, 3.75s/it] {'loss': 1.5001, 'grad_norm': 0.0021146119700528156, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:31<04:14, 3.75s/it] 87%|████████▋ | 453/520 [28:35<04:13, 3.78s/it] {'loss': 1.4676, 'grad_norm': 0.002205149945498312, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:35<04:13, 3.78s/it] 87%|████████▋ | 454/520 [28:39<04:10, 3.80s/it] {'loss': 1.216, 'grad_norm': 0.00206321243035161, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:39<04:10, 3.80s/it] 88%|████████▊ | 455/520 [28:43<04:07, 3.81s/it] {'loss': 1.3723, 'grad_norm': 0.002128728518124758, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:43<04:07, 3.81s/it] 88%|████████▊ | 456/520 [28:47<04:01, 3.77s/it] {'loss': 1.273, 'grad_norm': 0.0021346769577922585, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:47<04:01, 3.77s/it] 88%|████████▊ | 457/520 [28:50<03:55, 3.74s/it] {'loss': 1.5079, 'grad_norm': 0.002047833246375139, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:50<03:55, 3.74s/it] 88%|████████▊ | 458/520 [28:54<03:50, 3.72s/it] {'loss': 1.4406, 'grad_norm': 0.0024419050194288304, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:54<03:50, 3.72s/it] 88%|████████▊ | 459/520 [28:58<03:46, 3.71s/it] {'loss': 1.341, 'grad_norm': 0.0021203002918352334, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:58<03:46, 3.71s/it] 88%|████████▊ | 460/520 [29:01<03:41, 3.69s/it] {'loss': 1.2157, 'grad_norm': 0.00207800735692088, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:01<03:41, 3.69s/it] 89%|████████▊ | 461/520 [29:05<03:38, 3.70s/it] {'loss': 1.5905, 'grad_norm': 0.0018876500418458677, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:05<03:38, 3.70s/it] 89%|████████▉ | 462/520 [29:09<03:33, 3.68s/it] {'loss': 1.5392, 'grad_norm': 0.002320024334856487, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:09<03:33, 3.68s/it] 89%|████████▉ | 463/520 [29:12<03:29, 3.67s/it] {'loss': 1.1754, 'grad_norm': 0.0021980514835818915, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:12<03:29, 3.67s/it] 89%|████████▉ | 464/520 [29:16<03:26, 3.68s/it] {'loss': 1.3515, 'grad_norm': 0.002166291634937126, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:16<03:26, 3.68s/it] 89%|████████▉ | 465/520 [29:20<03:22, 3.68s/it] {'loss': 1.4531, 'grad_norm': 0.0022309012645780704, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:20<03:22, 3.68s/it] 90%|████████▉ | 466/520 [29:23<03:18, 3.68s/it] {'loss': 1.3319, 'grad_norm': 0.0019475788553093938, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:23<03:18, 3.68s/it] 90%|████████▉ | 467/520 [29:27<03:15, 3.68s/it] {'loss': 1.4158, 'grad_norm': 0.0020794040755203014, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:27<03:15, 3.68s/it] 90%|█████████ | 468/520 [29:31<03:11, 3.67s/it] {'loss': 1.3063, 'grad_norm': 0.002363265552148259, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:31<03:11, 3.67s/it] 90%|█████████ | 469/520 [29:34<03:07, 3.67s/it] {'loss': 1.3687, 'grad_norm': 0.002276395051369535, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:34<03:07, 3.67s/it] 90%|█████████ | 470/520 [29:38<03:03, 3.66s/it] {'loss': 1.222, 'grad_norm': 0.0020246267613686867, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:38<03:03, 3.66s/it] 91%|█████████ | 471/520 [29:42<02:59, 3.67s/it] {'loss': 1.2523, 'grad_norm': 0.0021659119975561145, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:42<02:59, 3.67s/it] 91%|█████████ | 472/520 [29:45<02:56, 3.68s/it] {'loss': 1.2159, 'grad_norm': 0.0022465181805641095, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:45<02:56, 3.68s/it] 91%|█████████ | 473/520 [29:49<02:53, 3.68s/it] {'loss': 1.2833, 'grad_norm': 0.0021736751174713516, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:49<02:53, 3.68s/it] 91%|█████████ | 474/520 [29:53<02:49, 3.68s/it] {'loss': 1.4513, 'grad_norm': 0.002145113545230208, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:53<02:49, 3.68s/it] 91%|█████████▏| 475/520 [29:57<02:47, 3.72s/it] {'loss': 1.3657, 'grad_norm': 0.002061604303033055, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:57<02:47, 3.72s/it] 92%|█████████▏| 476/520 [30:00<02:45, 3.77s/it] {'loss': 1.2744, 'grad_norm': 0.002185007350047715, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:00<02:45, 3.77s/it] 92%|█████████▏| 477/520 [30:04<02:43, 3.80s/it] {'loss': 1.2614, 'grad_norm': 0.0022203535785215037, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:04<02:43, 3.80s/it] 92%|█████████▏| 478/520 [30:08<02:40, 3.83s/it] {'loss': 1.2176, 'grad_norm': 0.002099642444670299, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:08<02:40, 3.83s/it] 92%|█████████▏| 479/520 [30:12<02:37, 3.85s/it] {'loss': 1.4507, 'grad_norm': 0.002268190593256614, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:12<02:37, 3.85s/it] 92%|█████████▏| 480/520 [30:16<02:34, 3.87s/it] {'loss': 1.4552, 'grad_norm': 0.002112386135207085, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:16<02:34, 3.87s/it] 92%|█████████▎| 481/520 [30:20<02:31, 3.87s/it] {'loss': 1.4756, 'grad_norm': 0.002104794494132047, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:20<02:31, 3.87s/it] 93%|█████████▎| 482/520 [30:24<02:27, 3.88s/it] {'loss': 1.4708, 'grad_norm': 0.0022106063050647737, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:24<02:27, 3.88s/it] 93%|█████████▎| 483/520 [30:28<02:23, 3.89s/it] {'loss': 1.308, 'grad_norm': 0.0020953621212241136, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:28<02:23, 3.89s/it] 93%|█████████▎| 484/520 [30:32<02:22, 3.95s/it] {'loss': 1.2938, 'grad_norm': 0.0021558117862140293, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:32<02:22, 3.95s/it] 93%|█████████▎| 485/520 [30:36<02:19, 3.99s/it] {'loss': 1.244, 'grad_norm': 0.0020556389825310255, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:36<02:19, 3.99s/it] 93%|█████████▎| 486/520 [30:40<02:16, 4.02s/it] {'loss': 1.3787, 'grad_norm': 0.0022055860706460513, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:40<02:16, 4.02s/it] 94%|█████████▎| 487/520 [30:44<02:13, 4.04s/it] {'loss': 1.216, 'grad_norm': 0.002020784061361674, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:44<02:13, 4.04s/it] 94%|█████████▍| 488/520 [30:48<02:09, 4.05s/it] {'loss': 1.1553, 'grad_norm': 0.0021541476121929902, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:48<02:09, 4.05s/it] 94%|█████████▍| 489/520 [30:52<02:04, 4.02s/it] {'loss': 1.4532, 'grad_norm': 0.0018720870982768024, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:52<02:04, 4.02s/it] 94%|█████████▍| 490/520 [30:56<01:59, 3.99s/it] {'loss': 1.2943, 'grad_norm': 0.0021690317680111283, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:56<01:59, 3.99s/it] 94%|█████████▍| 491/520 [31:00<01:54, 3.97s/it] {'loss': 1.2488, 'grad_norm': 0.002219365496646349, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [31:00<01:54, 3.97s/it] 95%|█████████▍| 492/520 [31:04<01:50, 3.94s/it] {'loss': 1.3809, 'grad_norm': 0.00225046348211799, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:04<01:50, 3.94s/it] 95%|█████████▍| 493/520 [31:08<01:46, 3.94s/it] {'loss': 1.5336, 'grad_norm': 0.002315226420978888, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:08<01:46, 3.94s/it] 95%|█████████▌| 494/520 [31:12<01:42, 3.94s/it] {'loss': 1.3114, 'grad_norm': 0.0019296332100055727, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:12<01:42, 3.94s/it] 95%|█████████▌| 495/520 [31:16<01:38, 3.93s/it] {'loss': 1.2621, 'grad_norm': 0.002048924273460424, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:16<01:38, 3.93s/it] 95%|█████████▌| 496/520 [31:20<01:34, 3.93s/it] {'loss': 1.1851, 'grad_norm': 0.0023466301307910327, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:20<01:34, 3.93s/it] 96%|█████████▌| 497/520 [31:23<01:30, 3.93s/it] {'loss': 1.3693, 'grad_norm': 0.0019589473268475043, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:23<01:30, 3.93s/it] 96%|█████████▌| 498/520 [31:27<01:25, 3.88s/it] {'loss': 1.2714, 'grad_norm': 0.002165971588546064, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:27<01:25, 3.88s/it] 96%|█████████▌| 499/520 [31:31<01:20, 3.84s/it] {'loss': 1.5421, 'grad_norm': 0.002269832551005721, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:31<01:20, 3.84s/it] 96%|█████████▌| 500/520 [31:35<01:16, 3.80s/it] {'loss': 1.3898, 'grad_norm': 0.002351215941498668, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:35<01:16, 3.80s/it] 96%|█████████▋| 501/520 [31:38<01:11, 3.78s/it] {'loss': 1.464, 'grad_norm': 0.0025699819730308155, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:38<01:11, 3.78s/it] 97%|█████████▋| 502/520 [31:42<01:07, 3.75s/it] {'loss': 1.2963, 'grad_norm': 0.0020343211081920998, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:42<01:07, 3.75s/it] 97%|█████████▋| 503/520 [31:46<01:03, 3.73s/it] {'loss': 1.4103, 'grad_norm': 0.0021604858737942134, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:46<01:03, 3.73s/it] 97%|█████████▋| 504/520 [31:49<00:59, 3.72s/it] {'loss': 1.307, 'grad_norm': 0.0024191446790144547, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:49<00:59, 3.72s/it] 97%|█████████▋| 505/520 [31:53<00:55, 3.71s/it] {'loss': 1.3503, 'grad_norm': 0.00224745245743493, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:53<00:55, 3.71s/it] 97%|█████████▋| 506/520 [31:57<00:51, 3.70s/it] {'loss': 1.2597, 'grad_norm': 0.0022104014074993505, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:57<00:51, 3.70s/it] 98%|█████████▊| 507/520 [32:01<00:48, 3.71s/it] {'loss': 1.5777, 'grad_norm': 0.00205325504570017, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:01<00:48, 3.71s/it] 98%|█████████▊| 508/520 [32:04<00:44, 3.71s/it] {'loss': 1.3808, 'grad_norm': 0.0021040613081452935, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:04<00:44, 3.71s/it] 98%|█████████▊| 509/520 [32:08<00:40, 3.72s/it] {'loss': 1.3381, 'grad_norm': 0.0021208816591366726, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:08<00:40, 3.72s/it] 98%|█████████▊| 510/520 [32:12<00:37, 3.72s/it] {'loss': 1.3006, 'grad_norm': 0.002198746667401141, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:12<00:37, 3.72s/it] 98%|█████████▊| 511/520 [32:15<00:33, 3.71s/it] {'loss': 1.2656, 'grad_norm': 0.0020063160846192376, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:15<00:33, 3.71s/it] 98%|█████████▊| 512/520 [32:19<00:29, 3.71s/it] {'loss': 1.1454, 'grad_norm': 0.0020898656917165454, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:19<00:29, 3.71s/it] 99%|█████████▊| 513/520 [32:23<00:25, 3.70s/it] {'loss': 1.3523, 'grad_norm': 0.002306496131497524, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:23<00:25, 3.70s/it] 99%|█████████▉| 514/520 [32:27<00:22, 3.70s/it] {'loss': 1.3341, 'grad_norm': 0.0018961904758268743, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:27<00:22, 3.70s/it] 99%|█████████▉| 515/520 [32:30<00:18, 3.69s/it] {'loss': 1.395, 'grad_norm': 0.0024455359398535396, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:30<00:18, 3.69s/it] 99%|█████████▉| 516/520 [32:34<00:14, 3.70s/it] {'loss': 1.2597, 'grad_norm': 0.0020347353048133323, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:34<00:14, 3.70s/it] 99%|█████████▉| 517/520 [32:38<00:11, 3.68s/it] {'loss': 1.4819, 'grad_norm': 0.0023040711826734445, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:38<00:11, 3.68s/it] 100%|█████████▉| 518/520 [32:41<00:07, 3.67s/it] {'loss': 1.3083, 'grad_norm': 0.0021173559192517533, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:41<00:07, 3.67s/it] 100%|█████████▉| 519/520 [32:45<00:03, 3.67s/it] {'loss': 1.4453, 'grad_norm': 0.0021046875527712954, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:45<00:03, 3.67s/it]Traceback (most recent call last): + File "/opt/conda/envs/tinyllava/lib/python3.10/multiprocessing/queues.py", line 251, in _feed + send_bytes(obj) + File "/opt/conda/envs/tinyllava/lib/python3.10/multiprocessing/connection.py", line 205, in send_bytes + self._send_bytes(m[offset:offset + size]) + File "/opt/conda/envs/tinyllava/lib/python3.10/multiprocessing/connection.py", line 416, in _send_bytes + self._send(header + buf) + File "/opt/conda/envs/tinyllava/lib/python3.10/multiprocessing/connection.py", line 373, in _send + n = write(self._handle, buf) +BrokenPipeError: [Errno 32] Broken pipe + 100%|██████████| 520/520 [32:50<00:00, 3.96s/it] {'loss': 1.5199, 'grad_norm': 0.002401350061747761, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:50<00:00, 3.96s/it] {'train_runtime': 1970.0515, 'train_samples_per_second': 33.77, 'train_steps_per_second': 0.264, 'train_loss': 1.4627838684962347, 'epoch': 1.0} + 100%|██████████| 520/520 [32:50<00:00, 3.96s/it] 100%|██████████| 520/520 [32:50<00:00, 3.79s/it] +[2025-10-10 06:42:17,526] [INFO] [launch.py:348:main] Process 564847 exits successfully. +[2025-10-10 06:42:18,528] [INFO] [launch.py:348:main] Process 564844 exits successfully. +[2025-10-10 06:42:18,529] [INFO] [launch.py:348:main] Process 564845 exits successfully. +[2025-10-10 06:42:18,529] [INFO] [launch.py:348:main] Process 564850 exits successfully. +[2025-10-10 06:42:18,530] [INFO] [launch.py:348:main] Process 564849 exits successfully. +[2025-10-10 06:42:18,530] [INFO] [launch.py:348:main] Process 564848 exits successfully. +[2025-10-10 06:42:19,532] [INFO] [launch.py:348:main] Process 564846 exits successfully. +[2025-10-10 06:42:22,536] [INFO] [launch.py:348:main] Process 564843 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_060753.log +Timestamp: 2025-10-10 06:42:24 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_164158.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_164158.log new file mode 100644 index 0000000000000000000000000000000000000000..2c876ba874f0a70f0d31672ae0485f5680dc54a0 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_164158.log @@ -0,0 +1,7 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.3_2e-1_connector-1.0_0.3_2e-1_ablation_20251010_164158.log +Timestamp: 2025-10-10 16:41:58 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 16:42:00,884] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_055850.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_055850.log new file mode 100644 index 0000000000000000000000000000000000000000..1ec2fe0bc4ee6effbbed0c277b65847aaef636c7 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_055850.log @@ -0,0 +1,9 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_055850.log +Timestamp: 2025-10-10 05:58:50 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:58:53,031] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:55,712] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 05:58:55,713] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060007.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060007.log new file mode 100644 index 0000000000000000000000000000000000000000..1d1a60b3f869592d74c8f1d5cde2c0685c2e3a88 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060007.log @@ -0,0 +1,1167 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060007.log +Timestamp: 2025-10-10 06:00:07 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:00:10,102] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:12,794] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 06:00:12,795] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:00:15,353] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:16,377] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 06:00:16,378] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 06:00:16,378] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 06:00:16,378] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 06:00:16,378] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 06:00:16,378] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 06:00:16,378] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 06:00:16,380] [INFO] [launch.py:253:main] process 552682 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:16,382] [INFO] [launch.py:253:main] process 552683 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:16,384] [INFO] [launch.py:253:main] process 552684 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:16,386] [INFO] [launch.py:253:main] process 552685 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:16,388] [INFO] [launch.py:253:main] process 552686 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:16,390] [INFO] [launch.py:253:main] process 552687 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:16,392] [INFO] [launch.py:253:main] process 552688 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:16,394] [INFO] [launch.py:253:main] process 552689 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:00:23,034] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:23,351] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:23,441] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:23,442] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:23,452] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:23,487] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:23,492] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:23,501] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:23,504] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:23,755] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:23,848] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:23,860] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:23,860] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 06:00:23,895] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:23,896] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:23,906] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:23,907] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:552682:552682 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552682:552682 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552682:552682 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:552682:552682 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:552682:552682 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:552682:552682 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test2-worker-0:552688:552688 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:552688:552688 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552688:552688 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552683:552683 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:552683:552683 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552683:552683 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552688:552688 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:552688:552688 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:552688:552688 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:552683:552683 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:552683:552683 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:552683:552683 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:552687:552687 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:552687:552687 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552687:552687 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552687:552687 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:552687:552687 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:552687:552687 [5] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:552684:552684 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:552684:552684 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552684:552684 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552684:552684 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:552684:552684 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:552684:552684 [2] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:552686:552686 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:552686:552686 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552686:552686 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552686:552686 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:552686:552686 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:552686:552686 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:552685:552685 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:552685:552685 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552685:552685 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552685:552685 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:552685:552685 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:552685:552685 [3] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:552689:552689 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:552689:552689 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552689:552689 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552689:552689 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:552689:552689 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:552689:552689 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO ncclCommInitRank comm 0x5579f4e4a770 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xaf59d3df4e3f57c8 - Init START +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO ncclCommInitRank comm 0x56192ff91700 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xaf59d3df4e3f57c8 - Init START +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO ncclCommInitRank comm 0x55abbe8e4600 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xaf59d3df4e3f57c8 - Init START +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO ncclCommInitRank comm 0x55c917f31fe0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xaf59d3df4e3f57c8 - Init START +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO ncclCommInitRank comm 0x55ffb0b3f8f0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xaf59d3df4e3f57c8 - Init START +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO ncclCommInitRank comm 0x55e795ccf5e0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xaf59d3df4e3f57c8 - Init START +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO ncclCommInitRank comm 0x55869f378850 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xaf59d3df4e3f57c8 - Init START +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO ncclCommInitRank comm 0x555bcef67e20 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xaf59d3df4e3f57c8 - Init START +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO comm 0x55ffb0b3f8f0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO comm 0x55c917f31fe0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO comm 0x55e795ccf5e0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO comm 0x55abbe8e4600 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO comm 0x56192ff91700 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO comm 0x55869f378850 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO comm 0x555bcef67e20 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO comm 0x5579f4e4a770 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:552689:554340 [7] NCCL INFO ncclCommInitRank comm 0x5579f4e4a770 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xaf59d3df4e3f57c8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:552688:554326 [6] NCCL INFO ncclCommInitRank comm 0x55ffb0b3f8f0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xaf59d3df4e3f57c8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:552687:554336 [5] NCCL INFO ncclCommInitRank comm 0x55abbe8e4600 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xaf59d3df4e3f57c8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:552686:554338 [4] NCCL INFO ncclCommInitRank comm 0x55869f378850 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xaf59d3df4e3f57c8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:552682:554323 [0] NCCL INFO ncclCommInitRank comm 0x555bcef67e20 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xaf59d3df4e3f57c8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:552683:554327 [1] NCCL INFO ncclCommInitRank comm 0x56192ff91700 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xaf59d3df4e3f57c8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:552685:554339 [3] NCCL INFO ncclCommInitRank comm 0x55c917f31fe0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xaf59d3df4e3f57c8 - Init COMPLETE +ywang29-vrdb-test2-worker-0:552684:554337 [2] NCCL INFO ncclCommInitRank comm 0x55e795ccf5e0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xaf59d3df4e3f57c8 - Init COMPLETE +[2025-10-10 06:01:10,777] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 06:01:12,505] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train +train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + data_module = make_supervised_data_module(tokenizer=tokenizer, +data_module = make_supervised_data_module(tokenizer=tokenizer, File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ +Traceback (most recent call last): + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +Pre-training init connector._connector.0.scores: Mean=1.000005 +Pre-training init connector._connector.2.scores: Mean=0.999970 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +[2025-10-10 06:01:15,467] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 552682 +[2025-10-10 06:01:15,469] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 552683 +[2025-10-10 06:01:15,469] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 552684 +[2025-10-10 06:01:15,470] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 552685 +[2025-10-10 06:01:15,472] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 552686 +[2025-10-10 06:01:15,524] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 552687 +[2025-10-10 06:01:15,525] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 552688 +[2025-10-10 06:01:15,526] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 552689 +[2025-10-10 06:01:15,528] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060007.log +Timestamp: 2025-10-10 06:01:16 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060330.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060330.log new file mode 100644 index 0000000000000000000000000000000000000000..4d800982b1e6120494716b501cba92055f7883d9 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060330.log @@ -0,0 +1,1167 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060330.log +Timestamp: 2025-10-10 06:03:30 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:03:33,291] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:03:36,358] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 06:03:36,359] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:03:38,963] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:03:39,996] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 06:03:39,996] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 06:03:39,996] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 06:03:39,996] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 06:03:39,996] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 06:03:39,996] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 06:03:39,996] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 06:03:39,998] [INFO] [launch.py:253:main] process 556971 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:03:40,000] [INFO] [launch.py:253:main] process 556972 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:03:40,002] [INFO] [launch.py:253:main] process 556973 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:03:40,004] [INFO] [launch.py:253:main] process 556974 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:03:40,006] [INFO] [launch.py:253:main] process 556975 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:03:40,008] [INFO] [launch.py:253:main] process 556976 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:03:40,010] [INFO] [launch.py:253:main] process 556977 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:03:40,012] [INFO] [launch.py:253:main] process 556978 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:03:46,648] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:03:46,880] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:03:46,936] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:03:47,049] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:03:47,072] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:03:47,072] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:03:47,072] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:03:47,072] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:03:47,089] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:03:47,279] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:03:47,337] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:03:47,337] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 06:03:47,469] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:03:47,469] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:03:47,471] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:03:47,472] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:03:47,484] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: /opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( + ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:556971:556971 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556971:556971 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556971:556971 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:556971:556971 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:556971:556971 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:556971:556971 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:556975:556975 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:556975:556975 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556975:556975 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556975:556975 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:556975:556975 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:556975:556975 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:556977:556977 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:556977:556977 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556977:556977 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556977:556977 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:556977:556977 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:556977:556977 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:556974:556974 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:556974:556974 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556974:556974 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556974:556974 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:556974:556974 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:556974:556974 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:556972:556972 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:556972:556972 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556972:556972 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556972:556972 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:556972:556972 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:556972:556972 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:556976:556976 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:556976:556976 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556976:556976 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556976:556976 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:556976:556976 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:556976:556976 [5] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:556978:556978 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:556978:556978 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556978:556978 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556978:556978 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:556978:556978 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:556978:556978 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:556973:556973 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556973:556973 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556973:556973 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:556973:556973 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:556973:556973 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:556973:556973 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO ncclCommInitRank comm 0x557b174bf910 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x5342e79d8a5a0f25 - Init START +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO ncclCommInitRank comm 0x5555f586bb60 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x5342e79d8a5a0f25 - Init START +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO ncclCommInitRank comm 0x555b5efc5ba0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x5342e79d8a5a0f25 - Init START +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO ncclCommInitRank comm 0x55e6cf6c9230 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x5342e79d8a5a0f25 - Init START +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO ncclCommInitRank comm 0x55dc95dbfca0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x5342e79d8a5a0f25 - Init START +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO ncclCommInitRank comm 0x55a2d8c51300 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x5342e79d8a5a0f25 - Init START +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO ncclCommInitRank comm 0x56184cd5ed70 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x5342e79d8a5a0f25 - Init START +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO ncclCommInitRank comm 0x55d0bd141060 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x5342e79d8a5a0f25 - Init START +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO comm 0x5555f586bb60 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO comm 0x55dc95dbfca0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO comm 0x55a2d8c51300 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO comm 0x555b5efc5ba0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO comm 0x55d0bd141060 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO comm 0x56184cd5ed70 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO comm 0x55e6cf6c9230 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO comm 0x557b174bf910 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:556977:558551 [6] NCCL INFO ncclCommInitRank comm 0x55e6cf6c9230 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x5342e79d8a5a0f25 - Init COMPLETE +ywang29-vrdb-test2-worker-0:556978:558561 [7] NCCL INFO ncclCommInitRank comm 0x56184cd5ed70 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x5342e79d8a5a0f25 - Init COMPLETE +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:556974:558552 [3] NCCL INFO ncclCommInitRank comm 0x55dc95dbfca0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x5342e79d8a5a0f25 - Init COMPLETE +ywang29-vrdb-test2-worker-0:556972:558553 [1] NCCL INFO ncclCommInitRank comm 0x55a2d8c51300 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x5342e79d8a5a0f25 - Init COMPLETE +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:556976:558554 [5] NCCL INFO ncclCommInitRank comm 0x557b174bf910 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x5342e79d8a5a0f25 - Init COMPLETE +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:556971:558549 [0] NCCL INFO ncclCommInitRank comm 0x55d0bd141060 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x5342e79d8a5a0f25 - Init COMPLETE +ywang29-vrdb-test2-worker-0:556973:558570 [2] NCCL INFO ncclCommInitRank comm 0x5555f586bb60 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x5342e79d8a5a0f25 - Init COMPLETE +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:556975:558550 [4] NCCL INFO ncclCommInitRank comm 0x555b5efc5ba0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x5342e79d8a5a0f25 - Init COMPLETE +[2025-10-10 06:04:34,182] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 06:04:35,896] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + data_module = make_supervised_data_module(tokenizer=tokenizer,train() + + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +Pre-training init connector._connector.0.scores: Mean=1.000005 +Pre-training init connector._connector.2.scores: Mean=0.999970 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +[2025-10-10 06:04:39,082] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 556971 +[2025-10-10 06:04:39,084] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 556972 +[2025-10-10 06:04:39,137] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 556973 +[2025-10-10 06:04:39,138] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 556974 +[2025-10-10 06:04:39,138] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 556975 +[2025-10-10 06:04:39,139] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 556976 +[2025-10-10 06:04:39,140] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 556977 +[2025-10-10 06:04:39,141] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 556978 +[2025-10-10 06:04:39,234] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_060330.log +Timestamp: 2025-10-10 06:04:40 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_064225.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_064225.log new file mode 100644 index 0000000000000000000000000000000000000000..2847baef2e7a8eaf333f70a134cfba490bcbc38d --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_064225.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_064225.log +Timestamp: 2025-10-10 06:42:25 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:42:27,650] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:42:30,348] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 06:42:30,350] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:42:32,919] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:42:33,972] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 06:42:33,972] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 06:42:33,972] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 06:42:33,972] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 06:42:33,972] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 06:42:33,972] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 06:42:33,972] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 06:42:33,974] [INFO] [launch.py:253:main] process 587480 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:42:33,976] [INFO] [launch.py:253:main] process 587481 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:42:33,978] [INFO] [launch.py:253:main] process 587482 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:42:33,980] [INFO] [launch.py:253:main] process 587483 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:42:33,982] [INFO] [launch.py:253:main] process 587484 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:42:33,984] [INFO] [launch.py:253:main] process 587485 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:42:33,986] [INFO] [launch.py:253:main] process 587486 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:42:33,988] [INFO] [launch.py:253:main] process 587487 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:42:40,785] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:42:40,902] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:42:40,906] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:42:40,942] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:42:40,943] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:42:40,943] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:42:40,945] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:42:40,952] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:42:41,183] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:42:41,307] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:42:41,309] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:42:41,348] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:42:41,348] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 06:42:41,349] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:42:41,352] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:42:41,354] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:42:41,358] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: Apply masks for the following modules: ['llm', 'connector'] +['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test2-worker-0:587480:587480 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587480:587480 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587480:587480 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:587480:587480 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:587480:587480 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:587480:587480 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test2-worker-0:587485:587485 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:587485:587485 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587485:587485 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587485:587485 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:587485:587485 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:587485:587485 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:587482:587482 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:587482:587482 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587482:587482 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587481:587481 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:587481:587481 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587481:587481 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587482:587482 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:587482:587482 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:587482:587482 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:587481:587481 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:587481:587481 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:587481:587481 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:587484:587484 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:587484:587484 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587484:587484 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587484:587484 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:587484:587484 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:587484:587484 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:587486:587486 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:587486:587486 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587486:587486 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587486:587486 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:587486:587486 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:587486:587486 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:587487:587487 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:587487:587487 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587487:587487 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587487:587487 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:587487:587487 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:587487:587487 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:587483:587483 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:587483:587483 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587483:587483 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587483:587483 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:587483:587483 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:587483:587483 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO ncclCommInitRank comm 0x55ad0c692f40 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x9336e4756d610082 - Init START +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO ncclCommInitRank comm 0x557ebf383900 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x9336e4756d610082 - Init START +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO ncclCommInitRank comm 0x55ac57b49470 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x9336e4756d610082 - Init START +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO ncclCommInitRank comm 0x5582eaa8a9e0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x9336e4756d610082 - Init START +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO ncclCommInitRank comm 0x55e786b00000 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x9336e4756d610082 - Init START +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO ncclCommInitRank comm 0x55e1b2028b80 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x9336e4756d610082 - Init START +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO ncclCommInitRank comm 0x56320e0f8440 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x9336e4756d610082 - Init START +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO ncclCommInitRank comm 0x5574655f0cb0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x9336e4756d610082 - Init START +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO comm 0x55ad0c692f40 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO comm 0x5582eaa8a9e0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO comm 0x56320e0f8440 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO comm 0x5574655f0cb0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO comm 0x55e1b2028b80 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO comm 0x55e786b00000 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO comm 0x557ebf383900 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO comm 0x55ac57b49470 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:587487:589086 [7] NCCL INFO ncclCommInitRank comm 0x55e1b2028b80 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x9336e4756d610082 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:587485:589081 [5] NCCL INFO ncclCommInitRank comm 0x557ebf383900 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x9336e4756d610082 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:587481:589083 [1] NCCL INFO ncclCommInitRank comm 0x5574655f0cb0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x9336e4756d610082 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:587483:589087 [3] NCCL INFO ncclCommInitRank comm 0x55ad0c692f40 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x9336e4756d610082 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:587484:589084 [4] NCCL INFO ncclCommInitRank comm 0x5582eaa8a9e0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x9336e4756d610082 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:587480:589080 [0] NCCL INFO ncclCommInitRank comm 0x55e786b00000 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x9336e4756d610082 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587482:589082 [2] NCCL INFO ncclCommInitRank comm 0x56320e0f8440 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x9336e4756d610082 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:587486:589085 [6] NCCL INFO ncclCommInitRank comm 0x55ac57b49470 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x9336e4756d610082 - Init COMPLETE +[2025-10-10 06:43:25,909] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 06:51:09,281] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin...Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... + +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Pre-training init connector._connector.0.scores: Mean=1.000005 +Pre-training init connector._connector.2.scores: Mean=0.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 06:51:26,905 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 06:51:26,912 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:007->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:587482:594346 [2] NCCL INFO ncclCommInitRank comm 0x7f239806ad80 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xe358ef7fbde450e7 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587486:594348 [6] NCCL INFO ncclCommInitRank comm 0x7f391806b590 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xe358ef7fbde450e7 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587484:594343 [4] NCCL INFO ncclCommInitRank comm 0x7f5d5c06a450 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xe358ef7fbde450e7 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587480:594342 [0] NCCL INFO ncclCommInitRank comm 0x7f30c806ae20 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xe358ef7fbde450e7 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587485:594345 [5] NCCL INFO ncclCommInitRank comm 0x7f555c06b5c0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xe358ef7fbde450e7 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587481:594344 [1] NCCL INFO ncclCommInitRank comm 0x7fb88006a940 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xe358ef7fbde450e7 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587487:594349 [7] NCCL INFO ncclCommInitRank comm 0x7efea406a5d0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xe358ef7fbde450e7 - Init COMPLETE +ywang29-vrdb-test2-worker-0:587483:594347 [3] NCCL INFO ncclCommInitRank comm 0x7fa0d806b010 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xe358ef7fbde450e7 - Init COMPLETE + 0%| | 1/520 [00:14<2:02:10, 14.12s/it] {'loss': 2.283, 'grad_norm': 0.13519810664063012, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:02:10, 14.12s/it] 0%| | 2/520 [00:17<1:09:33, 8.06s/it] {'loss': 2.2362, 'grad_norm': 0.14037087340803536, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:09:33, 8.06s/it] 1%| | 3/520 [00:21<52:40, 6.11s/it] {'loss': 1.876, 'grad_norm': 0.08978560354832714, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<52:40, 6.11s/it] 1%| | 4/520 [00:25<45:00, 5.23s/it] {'loss': 1.8923, 'grad_norm': 0.08125260076552325, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<45:00, 5.23s/it] 1%| | 5/520 [00:29<40:37, 4.73s/it] {'loss': 1.8957, 'grad_norm': 0.05669671716299449, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:29<40:37, 4.73s/it] 1%| | 6/520 [00:33<37:53, 4.42s/it] {'loss': 1.7169, 'grad_norm': 0.03718602359394781, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:33<37:53, 4.42s/it] 1%|▏ | 7/520 [00:37<36:05, 4.22s/it] {'loss': 1.6462, 'grad_norm': 0.02074196792714872, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:37<36:05, 4.22s/it] 2%|▏ | 8/520 [00:41<36:37, 4.29s/it] {'loss': 1.6553, 'grad_norm': 0.0177412016740908, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:41<36:37, 4.29s/it] 2%|▏ | 9/520 [00:45<36:54, 4.33s/it] {'loss': 1.7176, 'grad_norm': 0.015892382620567524, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:45<36:54, 4.33s/it] 2%|▏ | 10/520 [00:49<35:26, 4.17s/it] {'loss': 1.5284, 'grad_norm': 0.015618742453322215, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:49<35:26, 4.17s/it] 2%|▏ | 11/520 [00:53<34:46, 4.10s/it] {'loss': 1.6389, 'grad_norm': 0.015865378180497415, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:53<34:46, 4.10s/it] 2%|▏ | 12/520 [00:57<34:00, 4.02s/it] {'loss': 1.6752, 'grad_norm': 0.022839440683566452, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:57<34:00, 4.02s/it][2025-10-10 06:52:33,613] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:02<35:09, 4.16s/it] {'loss': 1.6875, 'grad_norm': 0.023112603153150953, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:02<35:09, 4.16s/it] 3%|▎ | 14/520 [01:05<34:14, 4.06s/it] {'loss': 1.759, 'grad_norm': 0.017734645045499244, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:05<34:14, 4.06s/it] 3%|▎ | 15/520 [01:09<33:32, 3.98s/it] {'loss': 1.835, 'grad_norm': 0.023409785871930395, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:09<33:32, 3.98s/it] 3%|▎ | 16/520 [01:13<33:00, 3.93s/it] {'loss': 1.7494, 'grad_norm': 0.018973514907294115, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:13<33:00, 3.93s/it] 3%|▎ | 17/520 [01:17<32:41, 3.90s/it] {'loss': 1.8553, 'grad_norm': 0.01582679357070885, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:17<32:41, 3.90s/it] 3%|▎ | 18/520 [01:21<32:25, 3.88s/it] {'loss': 1.6612, 'grad_norm': 0.011242610298246823, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:21<32:25, 3.88s/it] 4%|▎ | 19/520 [01:24<32:20, 3.87s/it] {'loss': 1.9158, 'grad_norm': 0.019085218997287774, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:24<32:20, 3.87s/it] 4%|▍ | 20/520 [01:28<32:08, 3.86s/it] {'loss': 1.7301, 'grad_norm': 0.017011950740149863, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:28<32:08, 3.86s/it] 4%|▍ | 21/520 [01:32<32:03, 3.85s/it] {'loss': 2.0098, 'grad_norm': 0.02083039611939987, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:32<32:03, 3.85s/it] 4%|▍ | 22/520 [01:36<31:43, 3.82s/it] {'loss': 1.9521, 'grad_norm': 0.03188343560151873, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:36<31:43, 3.82s/it] 4%|▍ | 23/520 [01:40<31:29, 3.80s/it] {'loss': 1.8613, 'grad_norm': 0.01968271715241993, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:40<31:29, 3.80s/it] 5%|▍ | 24/520 [01:43<31:07, 3.76s/it] {'loss': 1.9896, 'grad_norm': 0.02122963577060551, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:43<31:07, 3.76s/it] 5%|▍ | 25/520 [01:47<30:48, 3.74s/it] {'loss': 1.8586, 'grad_norm': 0.014017755933874741, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:47<30:48, 3.74s/it] 5%|▌ | 26/520 [01:51<30:43, 3.73s/it] {'loss': 1.9124, 'grad_norm': 0.016463966737521964, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:51<30:43, 3.73s/it] 5%|▌ | 27/520 [01:54<30:30, 3.71s/it] {'loss': 1.7602, 'grad_norm': 0.013325034413809738, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:54<30:30, 3.71s/it] 5%|▌ | 28/520 [01:58<30:19, 3.70s/it] {'loss': 1.7188, 'grad_norm': 0.010469320543079643, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:58<30:19, 3.70s/it] 6%|▌ | 29/520 [02:02<30:13, 3.69s/it] {'loss': 1.7274, 'grad_norm': 0.009732747457572034, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:02<30:13, 3.69s/it] 6%|▌ | 30/520 [02:05<30:10, 3.69s/it] {'loss': 2.3126, 'grad_norm': 0.038350433729357265, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:05<30:10, 3.69s/it] 6%|▌ | 31/520 [02:09<30:02, 3.69s/it] {'loss': 1.7829, 'grad_norm': 0.014310728059358378, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:09<30:02, 3.69s/it] 6%|▌ | 32/520 [02:13<29:59, 3.69s/it] {'loss': 2.6484, 'grad_norm': 0.03642326405212324, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:13<29:59, 3.69s/it] 6%|▋ | 33/520 [02:16<29:54, 3.69s/it] {'loss': 1.8392, 'grad_norm': 0.016736559727666134, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:16<29:54, 3.69s/it] 7%|▋ | 34/520 [02:20<29:53, 3.69s/it] {'loss': 1.8067, 'grad_norm': 0.02356613868185463, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:20<29:53, 3.69s/it] 7%|▋ | 35/520 [02:24<29:50, 3.69s/it] {'loss': 1.776, 'grad_norm': 0.016166763437382294, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:24<29:50, 3.69s/it] 7%|▋ | 36/520 [02:28<29:50, 3.70s/it] {'loss': 1.9751, 'grad_norm': 0.024650175753764447, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:28<29:50, 3.70s/it] 7%|▋ | 37/520 [02:31<29:45, 3.70s/it] {'loss': 2.5034, 'grad_norm': 0.0271101189223167, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:31<29:45, 3.70s/it] 7%|▋ | 38/520 [02:35<29:46, 3.71s/it] {'loss': 2.1198, 'grad_norm': 0.028827091270789616, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:35<29:46, 3.71s/it] 8%|▊ | 39/520 [02:39<29:32, 3.68s/it] {'loss': 2.0944, 'grad_norm': 0.05155769019518617, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:39<29:32, 3.68s/it] 8%|▊ | 40/520 [02:42<29:26, 3.68s/it] {'loss': 2.0904, 'grad_norm': 0.02712221780942725, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:42<29:26, 3.68s/it] 8%|▊ | 41/520 [02:46<29:26, 3.69s/it] {'loss': 1.8987, 'grad_norm': 0.01476997738549952, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:46<29:26, 3.69s/it] 8%|▊ | 42/520 [02:50<29:20, 3.68s/it] {'loss': 1.9341, 'grad_norm': 0.018502312798977065, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:50<29:20, 3.68s/it] 8%|▊ | 43/520 [02:53<29:24, 3.70s/it] {'loss': 2.2207, 'grad_norm': 0.015525106556859476, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:53<29:24, 3.70s/it] 8%|▊ | 44/520 [02:57<29:19, 3.70s/it] {'loss': 2.3563, 'grad_norm': 0.019121787742239507, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:57<29:19, 3.70s/it] 9%|▊ | 45/520 [03:01<29:14, 3.69s/it] {'loss': 1.8488, 'grad_norm': 0.01198128998553528, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:01<29:14, 3.69s/it] 9%|▉ | 46/520 [03:04<29:07, 3.69s/it] {'loss': 2.4915, 'grad_norm': 0.023403465712061963, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:04<29:07, 3.69s/it] 9%|▉ | 47/520 [03:08<28:53, 3.67s/it] {'loss': 1.9088, 'grad_norm': 0.017719212522676407, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:08<28:53, 3.67s/it] 9%|▉ | 48/520 [03:12<28:47, 3.66s/it] {'loss': 1.862, 'grad_norm': 0.01125387743565618, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:12<28:47, 3.66s/it] 9%|▉ | 49/520 [03:15<28:44, 3.66s/it] {'loss': 1.8625, 'grad_norm': 0.00955127217392494, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:15<28:44, 3.66s/it] 10%|▉ | 50/520 [03:19<28:38, 3.66s/it] {'loss': 1.8362, 'grad_norm': 0.010200047795584668, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:19<28:38, 3.66s/it] 10%|▉ | 51/520 [03:23<28:28, 3.64s/it] {'loss': 1.7337, 'grad_norm': 0.009106458478236601, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:23<28:28, 3.64s/it] 10%|█ | 52/520 [03:26<28:29, 3.65s/it] {'loss': 1.9219, 'grad_norm': 0.00987325731398594, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:26<28:29, 3.65s/it] 10%|█ | 53/520 [03:30<28:22, 3.65s/it] {'loss': 1.917, 'grad_norm': 0.008552261801919318, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:30<28:22, 3.65s/it] 10%|█ | 54/520 [03:34<28:20, 3.65s/it] {'loss': 1.7461, 'grad_norm': 0.008594050425954283, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:34<28:20, 3.65s/it] 11%|█ | 55/520 [03:37<28:18, 3.65s/it] {'loss': 1.7538, 'grad_norm': 0.007706694130974496, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:37<28:18, 3.65s/it] 11%|█ | 56/520 [03:41<28:10, 3.64s/it] {'loss': 1.925, 'grad_norm': 0.009274165758929993, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:41<28:10, 3.64s/it] 11%|█ | 57/520 [03:45<28:01, 3.63s/it] {'loss': 1.743, 'grad_norm': 0.00755518119777752, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:45<28:01, 3.63s/it] 11%|█ | 58/520 [03:48<27:53, 3.62s/it] {'loss': 1.8889, 'grad_norm': 0.006569094741107495, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:48<27:53, 3.62s/it] 11%|█▏ | 59/520 [03:52<27:49, 3.62s/it] {'loss': 2.1028, 'grad_norm': 0.012582188287365193, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:52<27:49, 3.62s/it] 12%|█▏ | 60/520 [03:55<27:45, 3.62s/it] {'loss': 1.8718, 'grad_norm': 0.010077405016328025, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:55<27:45, 3.62s/it] 12%|█▏ | 61/520 [03:59<27:58, 3.66s/it] {'loss': 2.1909, 'grad_norm': 0.010382350425290565, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:59<27:58, 3.66s/it] 12%|█▏ | 62/520 [04:03<28:14, 3.70s/it] {'loss': 1.7835, 'grad_norm': 0.007093530926411544, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:03<28:14, 3.70s/it] 12%|█▏ | 63/520 [04:07<28:26, 3.73s/it] {'loss': 1.7909, 'grad_norm': 0.0067993171011957, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:07<28:26, 3.73s/it] 12%|█▏ | 64/520 [04:11<28:38, 3.77s/it] {'loss': 1.7832, 'grad_norm': 0.006500608943091707, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:11<28:38, 3.77s/it] 12%|█▎ | 65/520 [04:14<28:39, 3.78s/it] {'loss': 1.7804, 'grad_norm': 0.007277544186507437, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:14<28:39, 3.78s/it] 13%|█▎ | 66/520 [04:18<28:35, 3.78s/it] {'loss': 1.7552, 'grad_norm': 0.00556492515939543, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:18<28:35, 3.78s/it] 13%|█▎ | 67/520 [04:22<28:31, 3.78s/it] {'loss': 1.5816, 'grad_norm': 0.00632969020944362, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:22<28:31, 3.78s/it] 13%|█▎ | 68/520 [04:26<28:33, 3.79s/it] {'loss': 1.6005, 'grad_norm': 0.005560513704265318, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:26<28:33, 3.79s/it] 13%|█▎ | 69/520 [04:30<28:32, 3.80s/it] {'loss': 1.5971, 'grad_norm': 0.006886269769336037, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:30<28:32, 3.80s/it] 13%|█▎ | 70/520 [04:33<28:34, 3.81s/it] {'loss': 1.6902, 'grad_norm': 0.006775914122866342, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:33<28:34, 3.81s/it] 14%|█▎ | 71/520 [04:37<28:28, 3.80s/it] {'loss': 1.594, 'grad_norm': 0.006864274886728539, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:37<28:28, 3.80s/it] 14%|█▍ | 72/520 [04:41<28:24, 3.80s/it] {'loss': 1.7216, 'grad_norm': 0.0060293993901256665, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:41<28:24, 3.80s/it] 14%|█▍ | 73/520 [04:45<28:20, 3.80s/it] {'loss': 1.5321, 'grad_norm': 0.0062184617645312445, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:45<28:20, 3.80s/it] 14%|█▍ | 74/520 [04:49<28:21, 3.81s/it] {'loss': 1.6925, 'grad_norm': 0.006679759222983939, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:49<28:21, 3.81s/it] 14%|█▍ | 75/520 [04:52<28:18, 3.82s/it] {'loss': 1.5406, 'grad_norm': 0.005730671862689429, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:52<28:18, 3.82s/it] 15%|█▍ | 76/520 [04:56<28:13, 3.81s/it] {'loss': 2.2266, 'grad_norm': 0.01244419805928863, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:56<28:13, 3.81s/it] 15%|█▍ | 77/520 [05:00<28:04, 3.80s/it] {'loss': 1.494, 'grad_norm': 0.007074196285916053, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:00<28:04, 3.80s/it] 15%|█▌ | 78/520 [05:04<28:04, 3.81s/it] {'loss': 1.6371, 'grad_norm': 0.005859488723801357, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:04<28:04, 3.81s/it] 15%|█▌ | 79/520 [05:08<27:57, 3.80s/it] {'loss': 1.6108, 'grad_norm': 0.0052689036798177135, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:08<27:57, 3.80s/it] 15%|█▌ | 80/520 [05:11<27:57, 3.81s/it] {'loss': 2.2339, 'grad_norm': 0.009241635321986344, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:11<27:57, 3.81s/it] 16%|█▌ | 81/520 [05:15<27:51, 3.81s/it] {'loss': 1.8054, 'grad_norm': 0.0071422163084840715, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:15<27:51, 3.81s/it] 16%|█▌ | 82/520 [05:19<27:48, 3.81s/it] {'loss': 1.6984, 'grad_norm': 0.0061752351464160644, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:19<27:48, 3.81s/it] 16%|█▌ | 83/520 [05:23<27:40, 3.80s/it] {'loss': 1.7295, 'grad_norm': 0.005796884937484929, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:23<27:40, 3.80s/it] 16%|█▌ | 84/520 [05:27<27:42, 3.81s/it] {'loss': 1.718, 'grad_norm': 0.006801002547227429, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:27<27:42, 3.81s/it] 16%|█▋ | 85/520 [05:31<27:37, 3.81s/it] {'loss': 1.696, 'grad_norm': 0.005883772611973862, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:31<27:37, 3.81s/it] 17%|█▋ | 86/520 [05:34<27:37, 3.82s/it] {'loss': 1.7823, 'grad_norm': 0.006414201725842457, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:34<27:37, 3.82s/it] 17%|█▋ | 87/520 [05:38<27:27, 3.80s/it] {'loss': 2.0886, 'grad_norm': 0.006908362663571971, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:38<27:27, 3.80s/it] 17%|█▋ | 88/520 [05:42<27:26, 3.81s/it] {'loss': 2.2295, 'grad_norm': 0.008299193033660661, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:42<27:26, 3.81s/it] 17%|█▋ | 89/520 [05:46<27:21, 3.81s/it] {'loss': 1.6629, 'grad_norm': 0.005938157272399599, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:46<27:21, 3.81s/it] 17%|█▋ | 90/520 [05:50<27:18, 3.81s/it] {'loss': 1.5822, 'grad_norm': 0.005335573821455053, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:50<27:18, 3.81s/it] 18%|█▊ | 91/520 [05:53<27:11, 3.80s/it] {'loss': 1.6887, 'grad_norm': 0.004789332192708502, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:53<27:11, 3.80s/it] 18%|█▊ | 92/520 [05:57<27:06, 3.80s/it] {'loss': 1.6147, 'grad_norm': 0.005339607451402481, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:57<27:06, 3.80s/it] 18%|█▊ | 93/520 [06:01<27:02, 3.80s/it] {'loss': 1.6309, 'grad_norm': 0.00828653387223282, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:01<27:02, 3.80s/it] 18%|█▊ | 94/520 [06:05<27:05, 3.82s/it] {'loss': 1.7453, 'grad_norm': 0.006792683030832454, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:05<27:05, 3.82s/it] 18%|█▊ | 95/520 [06:08<26:39, 3.76s/it] {'loss': 1.5826, 'grad_norm': 0.006242562969096488, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:08<26:39, 3.76s/it] 18%|█▊ | 96/520 [06:12<26:21, 3.73s/it] {'loss': 1.5785, 'grad_norm': 0.005527961431916764, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:12<26:21, 3.73s/it] 19%|█▊ | 97/520 [06:16<26:21, 3.74s/it] {'loss': 1.5494, 'grad_norm': 0.005926872566652679, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:16<26:21, 3.74s/it] 19%|█▉ | 98/520 [06:20<26:33, 3.78s/it] {'loss': 1.5457, 'grad_norm': 0.0047149083861068655, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:20<26:33, 3.78s/it] 19%|█▉ | 99/520 [06:24<26:42, 3.81s/it] {'loss': 1.6053, 'grad_norm': 0.005958561259899486, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:24<26:42, 3.81s/it] 19%|█▉ | 100/520 [06:27<26:43, 3.82s/it] {'loss': 1.877, 'grad_norm': 0.008705370876995114, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:27<26:43, 3.82s/it] 19%|█▉ | 101/520 [06:31<26:44, 3.83s/it] {'loss': 1.5746, 'grad_norm': 0.005336058649419358, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:31<26:44, 3.83s/it] 20%|█▉ | 102/520 [06:35<26:47, 3.85s/it] {'loss': 1.581, 'grad_norm': 0.00565188535214726, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:35<26:47, 3.85s/it] 20%|█▉ | 103/520 [06:39<26:44, 3.85s/it] {'loss': 1.4962, 'grad_norm': 0.004533007455430889, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:39<26:44, 3.85s/it] 20%|██ | 104/520 [06:43<26:42, 3.85s/it] {'loss': 1.5843, 'grad_norm': 0.005339186524004597, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:43<26:42, 3.85s/it] 20%|██ | 105/520 [06:47<26:40, 3.86s/it] {'loss': 1.5768, 'grad_norm': 0.004159355673177194, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:47<26:40, 3.86s/it] 20%|██ | 106/520 [06:51<26:36, 3.86s/it] {'loss': 1.8377, 'grad_norm': 0.006083729627596242, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:51<26:36, 3.86s/it] 21%|██ | 107/520 [06:54<26:32, 3.86s/it] {'loss': 1.8158, 'grad_norm': 0.006254165727250835, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:54<26:32, 3.86s/it] 21%|██ | 108/520 [06:58<26:30, 3.86s/it] {'loss': 1.536, 'grad_norm': 0.005841082231040518, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:58<26:30, 3.86s/it] 21%|██ | 109/520 [07:02<26:28, 3.87s/it] {'loss': 1.8024, 'grad_norm': 0.005013853576053513, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [07:02<26:28, 3.87s/it] 21%|██ | 110/520 [07:06<26:28, 3.87s/it] {'loss': 1.7267, 'grad_norm': 0.005093337521097579, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:06<26:28, 3.87s/it] 21%|██▏ | 111/520 [07:10<26:26, 3.88s/it] {'loss': 1.7354, 'grad_norm': 0.0058748246531064845, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:10<26:26, 3.88s/it] 22%|██▏ | 112/520 [07:14<26:18, 3.87s/it] {'loss': 1.5993, 'grad_norm': 0.004864640437177439, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:14<26:18, 3.87s/it] 22%|██▏ | 113/520 [07:18<26:12, 3.86s/it] {'loss': 1.448, 'grad_norm': 0.004382256558001323, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:18<26:12, 3.86s/it] 22%|██▏ | 114/520 [07:22<26:07, 3.86s/it] {'loss': 1.5657, 'grad_norm': 0.004867131636591133, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:22<26:07, 3.86s/it] 22%|██▏ | 115/520 [07:25<25:52, 3.83s/it] {'loss': 1.7175, 'grad_norm': 0.004574284386509222, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:25<25:52, 3.83s/it] 22%|██▏ | 116/520 [07:29<25:23, 3.77s/it] {'loss': 1.6864, 'grad_norm': 0.00436662839024576, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:29<25:23, 3.77s/it] 22%|██▎ | 117/520 [07:33<25:07, 3.74s/it] {'loss': 1.6853, 'grad_norm': 0.004955914266889431, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:33<25:07, 3.74s/it] 23%|██▎ | 118/520 [07:36<24:56, 3.72s/it] {'loss': 1.5218, 'grad_norm': 0.004154035027885038, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:36<24:56, 3.72s/it] 23%|██▎ | 119/520 [07:40<24:39, 3.69s/it] {'loss': 1.4815, 'grad_norm': 0.004662538935753702, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:40<24:39, 3.69s/it] 23%|██▎ | 120/520 [07:44<24:32, 3.68s/it] {'loss': 1.4915, 'grad_norm': 0.0052179965689076015, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:44<24:32, 3.68s/it] 23%|██▎ | 121/520 [07:47<24:26, 3.68s/it] {'loss': 1.5556, 'grad_norm': 0.004873408157140255, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:47<24:26, 3.68s/it] 23%|██▎ | 122/520 [07:51<24:20, 3.67s/it] {'loss': 1.4367, 'grad_norm': 0.004151164838504161, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:51<24:20, 3.67s/it] 24%|██▎ | 123/520 [07:55<24:16, 3.67s/it] {'loss': 1.8555, 'grad_norm': 0.005813269159851129, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:55<24:16, 3.67s/it] 24%|██▍ | 124/520 [07:58<24:06, 3.65s/it] {'loss': 1.5527, 'grad_norm': 0.0049811914546026535, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:58<24:06, 3.65s/it] 24%|██▍ | 125/520 [08:02<24:04, 3.66s/it] {'loss': 1.5192, 'grad_norm': 0.00453121776610244, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:02<24:04, 3.66s/it] 24%|██▍ | 126/520 [08:06<25:21, 3.86s/it] {'loss': 1.7093, 'grad_norm': 0.004374672705884683, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:06<25:21, 3.86s/it] 24%|██▍ | 127/520 [08:10<24:51, 3.80s/it] {'loss': 1.4878, 'grad_norm': 0.0054181193300071855, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:10<24:51, 3.80s/it] 25%|██▍ | 128/520 [08:13<24:32, 3.76s/it] {'loss': 1.5573, 'grad_norm': 0.0045059435504011145, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:13<24:32, 3.76s/it] 25%|██▍ | 129/520 [08:17<24:15, 3.72s/it] {'loss': 1.4595, 'grad_norm': 0.00402340750723688, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:17<24:15, 3.72s/it] 25%|██▌ | 130/520 [08:21<24:02, 3.70s/it] {'loss': 1.563, 'grad_norm': 0.004644935380072097, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:21<24:02, 3.70s/it] 25%|██▌ | 131/520 [08:24<23:56, 3.69s/it] {'loss': 1.6855, 'grad_norm': 0.0050018680772269605, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:24<23:56, 3.69s/it] 25%|██▌ | 132/520 [08:28<23:45, 3.67s/it] {'loss': 1.5928, 'grad_norm': 0.005697827103622193, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:28<23:45, 3.67s/it] 26%|██▌ | 133/520 [08:32<23:35, 3.66s/it] {'loss': 1.473, 'grad_norm': 0.0045251510173895705, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:32<23:35, 3.66s/it] 26%|██▌ | 134/520 [08:35<23:32, 3.66s/it] {'loss': 1.5681, 'grad_norm': 0.00418127108658868, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:35<23:32, 3.66s/it] 26%|██▌ | 135/520 [08:39<23:23, 3.64s/it] {'loss': 1.6351, 'grad_norm': 0.005224500377594337, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:39<23:23, 3.64s/it] 26%|██▌ | 136/520 [08:43<23:16, 3.64s/it] {'loss': 1.5523, 'grad_norm': 0.0041415039004027866, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:43<23:16, 3.64s/it] 26%|██▋ | 137/520 [08:46<23:11, 3.63s/it] {'loss': 1.4819, 'grad_norm': 0.005925524257227618, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:46<23:11, 3.63s/it] 27%|██▋ | 138/520 [08:50<23:04, 3.62s/it] {'loss': 1.4808, 'grad_norm': 0.004849239991827179, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:50<23:04, 3.62s/it] 27%|██▋ | 139/520 [08:53<23:03, 3.63s/it] {'loss': 1.5646, 'grad_norm': 0.005434802940120918, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:53<23:03, 3.63s/it] 27%|██▋ | 140/520 [08:57<23:01, 3.63s/it] {'loss': 1.7271, 'grad_norm': 0.0062537960561935705, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:57<23:01, 3.63s/it] 27%|██▋ | 141/520 [09:01<22:57, 3.63s/it] {'loss': 1.6168, 'grad_norm': 0.006122894792115431, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:01<22:57, 3.63s/it] 27%|██▋ | 142/520 [09:04<22:57, 3.64s/it] {'loss': 1.7897, 'grad_norm': 0.005832960169780689, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:04<22:57, 3.64s/it] 28%|██▊ | 143/520 [09:08<22:51, 3.64s/it] {'loss': 1.5367, 'grad_norm': 0.007852717077387071, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:08<22:51, 3.64s/it] 28%|██▊ | 144/520 [09:12<22:45, 3.63s/it] {'loss': 1.4457, 'grad_norm': 0.00487348474272633, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:12<22:45, 3.63s/it] 28%|██▊ | 145/520 [09:15<22:42, 3.63s/it] {'loss': 1.3887, 'grad_norm': 0.004721100574122141, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:15<22:42, 3.63s/it] 28%|██▊ | 146/520 [09:19<22:39, 3.64s/it] {'loss': 1.8153, 'grad_norm': 0.0064240743223304446, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:19<22:39, 3.64s/it] 28%|██▊ | 147/520 [09:23<22:38, 3.64s/it] {'loss': 1.4312, 'grad_norm': 0.00492094824081921, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:23<22:38, 3.64s/it] 28%|██▊ | 148/520 [09:26<22:36, 3.65s/it] {'loss': 1.476, 'grad_norm': 0.0043434616624140375, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:26<22:36, 3.65s/it] 29%|██▊ | 149/520 [09:30<22:34, 3.65s/it] {'loss': 1.4351, 'grad_norm': 0.006010043926870062, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:30<22:34, 3.65s/it] 29%|██▉ | 150/520 [09:34<22:27, 3.64s/it] {'loss': 1.6697, 'grad_norm': 0.005685042366554957, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:34<22:27, 3.64s/it] 29%|██▉ | 151/520 [09:37<22:24, 3.64s/it] {'loss': 1.4549, 'grad_norm': 0.004363515727737408, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:37<22:24, 3.64s/it] 29%|██▉ | 152/520 [09:41<22:18, 3.64s/it] {'loss': 1.4184, 'grad_norm': 0.006757051834866642, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:41<22:18, 3.64s/it] 29%|██▉ | 153/520 [09:44<22:15, 3.64s/it] {'loss': 1.4575, 'grad_norm': 0.005790328614232052, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:44<22:15, 3.64s/it] 30%|██▉ | 154/520 [09:48<22:09, 3.63s/it] {'loss': 1.549, 'grad_norm': 0.004338898803324724, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:48<22:09, 3.63s/it] 30%|██▉ | 155/520 [09:52<22:06, 3.64s/it] {'loss': 1.4343, 'grad_norm': 0.005650970403445355, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:52<22:06, 3.64s/it] 30%|███ | 156/520 [09:55<22:05, 3.64s/it] {'loss': 1.4969, 'grad_norm': 0.005078902885159368, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:55<22:05, 3.64s/it] 30%|███ | 157/520 [09:59<22:05, 3.65s/it] {'loss': 1.8118, 'grad_norm': 0.005047196051650763, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:59<22:05, 3.65s/it] 30%|███ | 158/520 [10:03<22:00, 3.65s/it] {'loss': 1.4646, 'grad_norm': 0.00618455271679003, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:03<22:00, 3.65s/it] 31%|███ | 159/520 [10:06<21:57, 3.65s/it] {'loss': 1.4763, 'grad_norm': 0.0045911374952438385, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:06<21:57, 3.65s/it] 31%|███ | 160/520 [10:10<21:51, 3.64s/it] {'loss': 1.5262, 'grad_norm': 0.004384799623449557, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:10<21:51, 3.64s/it] 31%|███ | 161/520 [10:14<21:47, 3.64s/it] {'loss': 1.51, 'grad_norm': 0.004958842200635707, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:14<21:47, 3.64s/it] 31%|███ | 162/520 [10:17<21:44, 3.64s/it] {'loss': 1.7049, 'grad_norm': 0.005030382287627356, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:17<21:44, 3.64s/it] 31%|███▏ | 163/520 [10:21<21:40, 3.64s/it] {'loss': 1.3639, 'grad_norm': 0.005096180623965769, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:21<21:40, 3.64s/it] 32%|███▏ | 164/520 [10:25<21:41, 3.66s/it] {'loss': 1.3293, 'grad_norm': 0.0044855548896646, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:25<21:41, 3.66s/it] 32%|███▏ | 165/520 [10:28<21:36, 3.65s/it] {'loss': 1.462, 'grad_norm': 0.004326873522683177, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:28<21:36, 3.65s/it] 32%|███▏ | 166/520 [10:32<21:29, 3.64s/it] {'loss': 1.4661, 'grad_norm': 0.004381640988489948, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:32<21:29, 3.64s/it] 32%|███▏ | 167/520 [10:36<21:30, 3.66s/it] {'loss': 1.4635, 'grad_norm': 0.004812952766834412, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:36<21:30, 3.66s/it] 32%|███▏ | 168/520 [10:39<21:26, 3.65s/it] {'loss': 1.4004, 'grad_norm': 0.0040118123353255685, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:39<21:26, 3.65s/it] 32%|███▎ | 169/520 [10:43<21:20, 3.65s/it] {'loss': 1.4821, 'grad_norm': 0.003679616995695422, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:43<21:20, 3.65s/it] 33%|███▎ | 170/520 [10:46<21:19, 3.66s/it] {'loss': 1.6013, 'grad_norm': 0.004347531755766048, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:46<21:19, 3.66s/it] 33%|███▎ | 171/520 [10:50<21:17, 3.66s/it] {'loss': 1.4094, 'grad_norm': 0.005005795416723377, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:50<21:17, 3.66s/it] 33%|███▎ | 172/520 [10:54<21:12, 3.66s/it] {'loss': 1.4741, 'grad_norm': 0.003973536557453113, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:54<21:12, 3.66s/it] 33%|███▎ | 173/520 [10:57<21:10, 3.66s/it] {'loss': 1.3989, 'grad_norm': 0.004203186455761019, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:57<21:10, 3.66s/it] 33%|███▎ | 174/520 [11:01<21:09, 3.67s/it] {'loss': 1.4775, 'grad_norm': 0.00521329048197246, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:01<21:09, 3.67s/it] 34%|███▎ | 175/520 [11:05<21:06, 3.67s/it] {'loss': 1.3845, 'grad_norm': 0.003977335023075121, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:05<21:06, 3.67s/it] 34%|███▍ | 176/520 [11:08<20:58, 3.66s/it] {'loss': 1.7107, 'grad_norm': 0.005044897462293952, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:08<20:58, 3.66s/it] 34%|███▍ | 177/520 [11:12<20:54, 3.66s/it] {'loss': 1.5592, 'grad_norm': 0.00549377596438909, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:12<20:54, 3.66s/it] 34%|███▍ | 178/520 [11:16<20:48, 3.65s/it] {'loss': 1.4439, 'grad_norm': 0.004183468188199654, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:16<20:48, 3.65s/it] 34%|███▍ | 179/520 [11:19<20:44, 3.65s/it] {'loss': 1.5551, 'grad_norm': 0.004176137318582701, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:19<20:44, 3.65s/it] 35%|███▍ | 180/520 [11:23<20:42, 3.66s/it] {'loss': 1.437, 'grad_norm': 0.004446576567175777, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:23<20:42, 3.66s/it] 35%|███▍ | 181/520 [11:27<20:36, 3.65s/it] {'loss': 1.4169, 'grad_norm': 0.003908878559816318, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:27<20:36, 3.65s/it] 35%|███▌ | 182/520 [11:30<20:33, 3.65s/it] {'loss': 1.4294, 'grad_norm': 0.0037733185052040087, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:30<20:33, 3.65s/it] 35%|███▌ | 183/520 [11:34<20:29, 3.65s/it] {'loss': 1.4725, 'grad_norm': 0.004677678361527083, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:34<20:29, 3.65s/it] 35%|███▌ | 184/520 [11:38<20:28, 3.66s/it] {'loss': 1.3613, 'grad_norm': 0.003943904240151563, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:38<20:28, 3.66s/it] 36%|███▌ | 185/520 [11:41<20:27, 3.66s/it] {'loss': 1.555, 'grad_norm': 0.003943297249136887, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:41<20:27, 3.66s/it] 36%|███▌ | 186/520 [11:45<20:24, 3.67s/it] {'loss': 1.3797, 'grad_norm': 0.003785807553835311, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:45<20:24, 3.67s/it] 36%|███▌ | 187/520 [11:49<20:21, 3.67s/it] {'loss': 1.4035, 'grad_norm': 0.004281107912393216, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:49<20:21, 3.67s/it] 36%|███▌ | 188/520 [11:52<20:16, 3.66s/it] {'loss': 1.4715, 'grad_norm': 0.0043895549208002775, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:52<20:16, 3.66s/it] 36%|███▋ | 189/520 [11:56<20:15, 3.67s/it] {'loss': 1.4964, 'grad_norm': 0.0035589618110715183, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:56<20:15, 3.67s/it] 37%|███▋ | 190/520 [12:00<20:09, 3.66s/it] {'loss': 1.4084, 'grad_norm': 0.004261619773465735, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:00<20:09, 3.66s/it] 37%|███▋ | 191/520 [12:03<20:11, 3.68s/it] {'loss': 1.3717, 'grad_norm': 0.0038915082133338143, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:03<20:11, 3.68s/it] 37%|███▋ | 192/520 [12:07<20:11, 3.69s/it] {'loss': 1.4474, 'grad_norm': 0.0036033588383504187, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:07<20:11, 3.69s/it] 37%|███▋ | 193/520 [12:11<20:07, 3.69s/it] {'loss': 1.6406, 'grad_norm': 0.004519381791307769, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:11<20:07, 3.69s/it] 37%|███▋ | 194/520 [12:15<20:05, 3.70s/it] {'loss': 1.4825, 'grad_norm': 0.004164198052141543, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:15<20:05, 3.70s/it] 38%|███▊ | 195/520 [12:18<20:01, 3.70s/it] {'loss': 1.4618, 'grad_norm': 0.0036500029202778347, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:18<20:01, 3.70s/it] 38%|███▊ | 196/520 [12:22<19:54, 3.69s/it] {'loss': 1.4146, 'grad_norm': 0.003951735427141242, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:22<19:54, 3.69s/it] 38%|███▊ | 197/520 [12:26<19:52, 3.69s/it] {'loss': 1.3935, 'grad_norm': 0.004000600881273943, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:26<19:52, 3.69s/it] 38%|███▊ | 198/520 [12:29<19:48, 3.69s/it] {'loss': 1.4807, 'grad_norm': 0.004325309094810333, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:29<19:48, 3.69s/it] 38%|███▊ | 199/520 [12:33<19:44, 3.69s/it] {'loss': 1.3767, 'grad_norm': 0.0037703491938470596, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:33<19:44, 3.69s/it] 38%|███▊ | 200/520 [12:37<19:41, 3.69s/it] {'loss': 1.5324, 'grad_norm': 0.004208354379685943, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:37<19:41, 3.69s/it] 39%|███▊ | 201/520 [12:40<19:36, 3.69s/it] {'loss': 1.5381, 'grad_norm': 0.003605839895283626, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:40<19:36, 3.69s/it] 39%|███▉ | 202/520 [12:44<19:32, 3.69s/it] {'loss': 1.3633, 'grad_norm': 0.0037602824239472196, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:44<19:32, 3.69s/it] 39%|███▉ | 203/520 [12:48<19:27, 3.68s/it] {'loss': 1.4247, 'grad_norm': 0.0038701836625275724, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:48<19:27, 3.68s/it] 39%|███▉ | 204/520 [12:51<19:21, 3.68s/it] {'loss': 1.464, 'grad_norm': 0.004472443288145208, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:51<19:21, 3.68s/it] 39%|███▉ | 205/520 [12:55<19:23, 3.69s/it] {'loss': 1.571, 'grad_norm': 0.004077548866692373, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:55<19:23, 3.69s/it] 40%|███▉ | 206/520 [12:59<19:18, 3.69s/it] {'loss': 1.5123, 'grad_norm': 0.003766354631894851, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [12:59<19:18, 3.69s/it] 40%|███▉ | 207/520 [13:02<19:15, 3.69s/it] {'loss': 1.5424, 'grad_norm': 0.0036711892993935057, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:02<19:15, 3.69s/it] 40%|████ | 208/520 [13:06<19:12, 3.69s/it] {'loss': 1.4467, 'grad_norm': 0.004090680928288419, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:06<19:12, 3.69s/it] 40%|████ | 209/520 [13:10<19:07, 3.69s/it] {'loss': 1.3855, 'grad_norm': 0.003800149488394726, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:10<19:07, 3.69s/it] 40%|████ | 210/520 [13:14<19:07, 3.70s/it] {'loss': 1.4658, 'grad_norm': 0.004193657396503104, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:14<19:07, 3.70s/it] 41%|████ | 211/520 [13:17<19:01, 3.69s/it] {'loss': 1.4871, 'grad_norm': 0.0036347865631214137, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:17<19:01, 3.69s/it] 41%|████ | 212/520 [13:21<18:55, 3.69s/it] {'loss': 1.4305, 'grad_norm': 0.0036386311044701216, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:21<18:55, 3.69s/it] 41%|████ | 213/520 [13:25<18:54, 3.69s/it] {'loss': 1.4062, 'grad_norm': 0.004339149484716636, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:25<18:54, 3.69s/it] 41%|████ | 214/520 [13:28<18:50, 3.70s/it] {'loss': 1.3948, 'grad_norm': 0.003880529181130728, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:28<18:50, 3.70s/it] 41%|████▏ | 215/520 [13:32<18:45, 3.69s/it] {'loss': 1.449, 'grad_norm': 0.0038377267658531258, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:32<18:45, 3.69s/it] 42%|████▏ | 216/520 [13:36<18:47, 3.71s/it] {'loss': 1.2994, 'grad_norm': 0.0038337074952759817, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:36<18:47, 3.71s/it] 42%|████▏ | 217/520 [13:40<18:47, 3.72s/it] {'loss': 1.4334, 'grad_norm': 0.0036073621392991827, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:40<18:47, 3.72s/it] 42%|████▏ | 218/520 [13:43<18:56, 3.76s/it] {'loss': 1.4372, 'grad_norm': 0.0039019216613980392, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:43<18:56, 3.76s/it] 42%|████▏ | 219/520 [13:47<18:58, 3.78s/it] {'loss': 1.3857, 'grad_norm': 0.00333944297674297, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:47<18:58, 3.78s/it] 42%|████▏ | 220/520 [13:51<19:00, 3.80s/it] {'loss': 1.5176, 'grad_norm': 0.004052095422632214, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:51<19:00, 3.80s/it] 42%|████▎ | 221/520 [13:55<19:00, 3.81s/it] {'loss': 1.4357, 'grad_norm': 0.004089540427123695, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:55<19:00, 3.81s/it] 43%|████▎ | 222/520 [13:59<18:59, 3.83s/it] {'loss': 1.3339, 'grad_norm': 0.0037566704381061865, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [13:59<18:59, 3.83s/it] 43%|████▎ | 223/520 [14:03<18:57, 3.83s/it] {'loss': 1.3281, 'grad_norm': 0.003446253681165315, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:03<18:57, 3.83s/it] 43%|████▎ | 224/520 [14:06<18:54, 3.83s/it] {'loss': 1.8097, 'grad_norm': 0.005076199194348646, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:06<18:54, 3.83s/it] 43%|████▎ | 225/520 [14:10<18:50, 3.83s/it] {'loss': 1.3465, 'grad_norm': 0.003907281215559007, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:10<18:50, 3.83s/it] 43%|████▎ | 226/520 [14:14<18:48, 3.84s/it] {'loss': 1.4519, 'grad_norm': 0.0037570863047070653, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:14<18:48, 3.84s/it] 44%|████▎ | 227/520 [14:18<18:38, 3.82s/it] {'loss': 1.4382, 'grad_norm': 0.0033845673298389164, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:18<18:38, 3.82s/it] 44%|████▍ | 228/520 [14:22<18:26, 3.79s/it] {'loss': 1.6826, 'grad_norm': 0.004177108140487863, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:22<18:26, 3.79s/it] 44%|████▍ | 229/520 [14:25<18:23, 3.79s/it] {'loss': 1.4342, 'grad_norm': 0.0033411233508272883, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:25<18:23, 3.79s/it] 44%|████▍ | 230/520 [14:29<18:08, 3.75s/it] {'loss': 1.2848, 'grad_norm': 0.003641513398938032, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:29<18:08, 3.75s/it] 44%|████▍ | 231/520 [14:33<17:56, 3.73s/it] {'loss': 1.3457, 'grad_norm': 0.003298592017431885, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:33<17:56, 3.73s/it] 45%|████▍ | 232/520 [14:36<17:45, 3.70s/it] {'loss': 1.7173, 'grad_norm': 0.004219534242474612, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:36<17:45, 3.70s/it] 45%|████▍ | 233/520 [14:40<17:37, 3.68s/it] {'loss': 1.5706, 'grad_norm': 0.003954844938891517, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:40<17:37, 3.68s/it] 45%|████▌ | 234/520 [14:44<17:32, 3.68s/it] {'loss': 1.2927, 'grad_norm': 0.0037014458906164858, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:44<17:32, 3.68s/it] 45%|████▌ | 235/520 [14:47<17:27, 3.68s/it] {'loss': 1.3453, 'grad_norm': 0.003870421837217634, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:47<17:27, 3.68s/it] 45%|████▌ | 236/520 [14:51<17:23, 3.67s/it] {'loss': 1.4952, 'grad_norm': 0.003496729633443871, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:51<17:23, 3.67s/it] 46%|████▌ | 237/520 [14:55<17:19, 3.67s/it] {'loss': 1.4327, 'grad_norm': 0.003663441954589593, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:55<17:19, 3.67s/it] 46%|████▌ | 238/520 [14:58<17:13, 3.67s/it] {'loss': 1.3734, 'grad_norm': 0.003685289433029505, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [14:58<17:13, 3.67s/it] 46%|████▌ | 239/520 [15:02<17:10, 3.67s/it] {'loss': 1.4938, 'grad_norm': 0.003948956996643289, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:02<17:10, 3.67s/it] 46%|████▌ | 240/520 [15:06<17:06, 3.66s/it] {'loss': 1.2244, 'grad_norm': 0.0031998477981113925, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:06<17:06, 3.66s/it] 46%|████▋ | 241/520 [15:09<17:01, 3.66s/it] {'loss': 1.3146, 'grad_norm': 0.0037060707537697082, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:09<17:01, 3.66s/it] 47%|████▋ | 242/520 [15:13<17:00, 3.67s/it] {'loss': 1.3426, 'grad_norm': 0.0033837985580351788, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:13<17:00, 3.67s/it] 47%|████▋ | 243/520 [15:17<16:56, 3.67s/it] {'loss': 1.345, 'grad_norm': 0.003563586386353286, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:17<16:56, 3.67s/it] 47%|████▋ | 244/520 [15:21<17:08, 3.73s/it] {'loss': 1.4941, 'grad_norm': 0.004017796650237056, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:21<17:08, 3.73s/it] 47%|████▋ | 245/520 [15:24<17:14, 3.76s/it] {'loss': 1.3296, 'grad_norm': 0.003683627281392317, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:24<17:14, 3.76s/it] 47%|████▋ | 246/520 [15:28<17:15, 3.78s/it] {'loss': 1.6767, 'grad_norm': 0.004020954830571835, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:28<17:15, 3.78s/it] 48%|████▊ | 247/520 [15:32<17:13, 3.79s/it] {'loss': 1.5078, 'grad_norm': 0.0036018218436966298, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:32<17:13, 3.79s/it] 48%|████▊ | 248/520 [15:36<17:14, 3.80s/it] {'loss': 1.3205, 'grad_norm': 0.0035342613446437882, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:36<17:14, 3.80s/it] 48%|████▊ | 249/520 [15:40<17:12, 3.81s/it] {'loss': 1.4553, 'grad_norm': 0.0037671167952380374, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:40<17:12, 3.81s/it] 48%|████▊ | 250/520 [15:44<17:14, 3.83s/it] {'loss': 1.3905, 'grad_norm': 0.004038343718618196, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:44<17:14, 3.83s/it] 48%|████▊ | 251/520 [15:47<17:00, 3.80s/it] {'loss': 1.4389, 'grad_norm': 0.003309737686146884, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:47<17:00, 3.80s/it] 48%|████▊ | 252/520 [15:51<16:46, 3.76s/it] {'loss': 1.5386, 'grad_norm': 0.0035064106512691217, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:51<16:46, 3.76s/it] 49%|████▊ | 253/520 [15:55<16:36, 3.73s/it] {'loss': 1.4421, 'grad_norm': 0.004019820701350469, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:55<16:36, 3.73s/it] 49%|████▉ | 254/520 [15:58<16:28, 3.72s/it] {'loss': 1.3399, 'grad_norm': 0.0032056563380070307, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:58<16:28, 3.72s/it] 49%|████▉ | 255/520 [16:02<16:20, 3.70s/it] {'loss': 1.3698, 'grad_norm': 0.003649577646671097, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:02<16:20, 3.70s/it] 49%|████▉ | 256/520 [16:06<16:11, 3.68s/it] {'loss': 1.3997, 'grad_norm': 0.003822183946314945, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:06<16:11, 3.68s/it] 49%|████▉ | 257/520 [16:09<16:16, 3.71s/it] {'loss': 1.4138, 'grad_norm': 0.0035851111641414584, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:09<16:16, 3.71s/it] 50%|████▉ | 258/520 [16:13<16:26, 3.77s/it] {'loss': 1.4265, 'grad_norm': 0.003152370658215548, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:13<16:26, 3.77s/it] 50%|████▉ | 259/520 [16:17<16:30, 3.80s/it] {'loss': 1.4815, 'grad_norm': 0.003904524963173172, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:17<16:30, 3.80s/it] 50%|█████ | 260/520 [16:21<16:31, 3.81s/it] {'loss': 1.6472, 'grad_norm': 0.003560052440410982, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:21<16:31, 3.81s/it] 50%|█████ | 261/520 [16:25<16:33, 3.84s/it] {'loss': 1.5536, 'grad_norm': 0.003944014289405612, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:25<16:33, 3.84s/it] 50%|█████ | 262/520 [16:29<16:32, 3.85s/it] {'loss': 1.3189, 'grad_norm': 0.0035617786017012984, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:29<16:32, 3.85s/it] 51%|█████ | 263/520 [16:33<16:29, 3.85s/it] {'loss': 1.5698, 'grad_norm': 0.003880246743599312, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:33<16:29, 3.85s/it] 51%|█████ | 264/520 [16:36<16:29, 3.86s/it] {'loss': 1.4488, 'grad_norm': 0.003434045027415627, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:36<16:29, 3.86s/it] 51%|█████ | 265/520 [16:40<16:25, 3.86s/it] {'loss': 1.3264, 'grad_norm': 0.004141161410917559, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:40<16:25, 3.86s/it] 51%|█████ | 266/520 [16:44<16:23, 3.87s/it] {'loss': 1.1649, 'grad_norm': 0.003004039736890688, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:44<16:23, 3.87s/it] 51%|█████▏ | 267/520 [16:48<16:22, 3.88s/it] {'loss': 1.3324, 'grad_norm': 0.0034273939619217137, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:48<16:22, 3.88s/it] 52%|█████▏ | 268/520 [16:52<16:21, 3.89s/it] {'loss': 1.6944, 'grad_norm': 0.004304751426775206, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:52<16:21, 3.89s/it] 52%|█████▏ | 269/520 [16:56<16:16, 3.89s/it] {'loss': 1.4444, 'grad_norm': 0.003692908165325492, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:56<16:16, 3.89s/it] 52%|█████▏ | 270/520 [17:00<16:12, 3.89s/it] {'loss': 1.464, 'grad_norm': 0.0035165952944263123, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:00<16:12, 3.89s/it] 52%|█████▏ | 271/520 [17:04<16:09, 3.89s/it] {'loss': 1.4481, 'grad_norm': 0.0036784330042830536, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:04<16:09, 3.89s/it] 52%|█████▏ | 272/520 [17:08<16:05, 3.89s/it] {'loss': 1.4662, 'grad_norm': 0.003850456693774647, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:08<16:05, 3.89s/it] 52%|█████▎ | 273/520 [17:12<15:59, 3.88s/it] {'loss': 1.6164, 'grad_norm': 0.004397016514666861, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:12<15:59, 3.88s/it] 53%|█████▎ | 274/520 [17:15<15:55, 3.89s/it] {'loss': 1.3777, 'grad_norm': 0.003739114526853269, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:15<15:55, 3.89s/it] 53%|█████▎ | 275/520 [17:19<15:50, 3.88s/it] {'loss': 1.3223, 'grad_norm': 0.003914895558463294, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:19<15:50, 3.88s/it] 53%|█████▎ | 276/520 [17:23<15:45, 3.87s/it] {'loss': 1.433, 'grad_norm': 0.003785134438214683, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:23<15:45, 3.87s/it] 53%|█████▎ | 277/520 [17:27<15:42, 3.88s/it] {'loss': 1.5884, 'grad_norm': 0.003612085588034402, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:27<15:42, 3.88s/it] 53%|█████▎ | 278/520 [17:31<15:38, 3.88s/it] {'loss': 1.2673, 'grad_norm': 0.00323462176221081, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:31<15:38, 3.88s/it] 54%|█████▎ | 279/520 [17:35<15:34, 3.88s/it] {'loss': 1.4998, 'grad_norm': 0.004197617523136716, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:35<15:34, 3.88s/it] 54%|█████▍ | 280/520 [17:39<15:30, 3.88s/it] {'loss': 1.3383, 'grad_norm': 0.003990745262778152, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:39<15:30, 3.88s/it] 54%|█████▍ | 281/520 [17:43<15:25, 3.87s/it] {'loss': 1.4523, 'grad_norm': 0.003614675889678737, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:43<15:25, 3.87s/it] 54%|█████▍ | 282/520 [17:46<15:21, 3.87s/it] {'loss': 1.2955, 'grad_norm': 0.003250656110627436, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:46<15:21, 3.87s/it] 54%|█████▍ | 283/520 [17:50<15:19, 3.88s/it] {'loss': 1.4931, 'grad_norm': 0.003919164067947631, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:50<15:19, 3.88s/it] 55%|█████▍ | 284/520 [17:54<15:16, 3.88s/it] {'loss': 1.4611, 'grad_norm': 0.004315024792901797, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:54<15:16, 3.88s/it] 55%|█████▍ | 285/520 [17:58<15:11, 3.88s/it] {'loss': 1.3295, 'grad_norm': 0.0038451096035102264, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:58<15:11, 3.88s/it] 55%|█████▌ | 286/520 [18:02<15:06, 3.87s/it] {'loss': 1.1876, 'grad_norm': 0.0037141775778277352, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:02<15:06, 3.87s/it] 55%|█████▌ | 287/520 [18:06<15:03, 3.88s/it] {'loss': 1.4456, 'grad_norm': 0.004163171501103265, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:06<15:03, 3.88s/it] 55%|█████▌ | 288/520 [18:10<14:59, 3.88s/it] {'loss': 1.4983, 'grad_norm': 0.00333517550701863, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:10<14:59, 3.88s/it] 56%|█████▌ | 289/520 [18:14<14:56, 3.88s/it] {'loss': 1.3317, 'grad_norm': 0.0032564740695668415, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:14<14:56, 3.88s/it] 56%|█████▌ | 290/520 [18:17<14:51, 3.88s/it] {'loss': 1.2454, 'grad_norm': 0.0032119036672973115, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:17<14:51, 3.88s/it] 56%|█████▌ | 291/520 [18:21<14:47, 3.87s/it] {'loss': 1.3022, 'grad_norm': 0.003747769059427967, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:21<14:47, 3.87s/it] 56%|█████▌ | 292/520 [18:25<14:43, 3.88s/it] {'loss': 1.3543, 'grad_norm': 0.0033070593147457943, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:25<14:43, 3.88s/it] 56%|█████▋ | 293/520 [18:29<14:38, 3.87s/it] {'loss': 1.2889, 'grad_norm': 0.003478120226246849, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:29<14:38, 3.87s/it] 57%|█████▋ | 294/520 [18:33<14:37, 3.88s/it] {'loss': 1.3303, 'grad_norm': 0.003604876641436474, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:33<14:37, 3.88s/it] 57%|█████▋ | 295/520 [18:37<14:37, 3.90s/it] {'loss': 1.5859, 'grad_norm': 0.010417942878189482, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:37<14:37, 3.90s/it] 57%|█████▋ | 296/520 [18:41<14:29, 3.88s/it] {'loss': 1.2633, 'grad_norm': 0.003577396615712505, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:41<14:29, 3.88s/it] 57%|█████▋ | 297/520 [18:45<14:23, 3.87s/it] {'loss': 1.409, 'grad_norm': 0.0038534745065162794, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:45<14:23, 3.87s/it] 57%|█████▋ | 298/520 [18:48<14:18, 3.87s/it] {'loss': 1.3761, 'grad_norm': 0.0032526045041070344, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:48<14:18, 3.87s/it] 57%|█████▊ | 299/520 [18:52<14:15, 3.87s/it] {'loss': 1.5381, 'grad_norm': 0.0034132558729588116, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:52<14:15, 3.87s/it] 58%|█████▊ | 300/520 [18:56<14:11, 3.87s/it] {'loss': 1.4397, 'grad_norm': 0.003485649022305374, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:56<14:11, 3.87s/it] 58%|█████▊ | 301/520 [19:00<13:58, 3.83s/it] {'loss': 1.402, 'grad_norm': 0.0036547438280739026, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:00<13:58, 3.83s/it] 58%|█████▊ | 302/520 [19:04<13:44, 3.78s/it] {'loss': 1.5628, 'grad_norm': 0.003606858901394106, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:04<13:44, 3.78s/it] 58%|█████▊ | 303/520 [19:07<13:32, 3.74s/it] {'loss': 1.3307, 'grad_norm': 0.0036758515855755142, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:07<13:32, 3.74s/it] 58%|█████▊ | 304/520 [19:11<13:25, 3.73s/it] {'loss': 1.4896, 'grad_norm': 0.0047272806251565415, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:11<13:25, 3.73s/it] 59%|█████▊ | 305/520 [19:15<13:17, 3.71s/it] {'loss': 1.4769, 'grad_norm': 0.004355388962159931, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:15<13:17, 3.71s/it] 59%|█████▉ | 306/520 [19:18<13:12, 3.70s/it] {'loss': 1.3856, 'grad_norm': 0.0034637063542927926, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:18<13:12, 3.70s/it] 59%|█████▉ | 307/520 [19:22<13:10, 3.71s/it] {'loss': 1.3241, 'grad_norm': 0.0032389024579241566, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:22<13:10, 3.71s/it] 59%|█████▉ | 308/520 [19:26<13:17, 3.76s/it] {'loss': 1.4491, 'grad_norm': 0.003462654364321912, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:26<13:17, 3.76s/it] 59%|█████▉ | 309/520 [19:30<13:42, 3.90s/it] {'loss': 1.3133, 'grad_norm': 0.003251399519291262, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:30<13:42, 3.90s/it] 60%|█████▉ | 310/520 [19:34<13:37, 3.89s/it] {'loss': 1.2892, 'grad_norm': 0.003292791351207003, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:34<13:37, 3.89s/it] 60%|█████▉ | 311/520 [19:38<13:32, 3.89s/it] {'loss': 1.26, 'grad_norm': 0.003300055571743156, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:38<13:32, 3.89s/it] 60%|██████ | 312/520 [19:42<13:27, 3.88s/it] {'loss': 1.2481, 'grad_norm': 0.0035062656172888808, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:42<13:27, 3.88s/it] 60%|██████ | 313/520 [19:46<13:24, 3.89s/it] {'loss': 1.2353, 'grad_norm': 0.0031566055988123394, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:46<13:24, 3.89s/it] 60%|██████ | 314/520 [19:50<13:43, 4.00s/it] {'loss': 1.2704, 'grad_norm': 0.0031518800306106574, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:50<13:43, 4.00s/it] 61%|██████ | 315/520 [19:54<13:34, 3.97s/it] {'loss': 1.6416, 'grad_norm': 0.009580107547377661, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:54<13:34, 3.97s/it] 61%|██████ | 316/520 [19:58<13:50, 4.07s/it] {'loss': 1.2441, 'grad_norm': 0.004182231412554762, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:58<13:50, 4.07s/it] 61%|██████ | 317/520 [20:02<13:34, 4.01s/it] {'loss': 1.2729, 'grad_norm': 0.003206781522145497, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [20:02<13:34, 4.01s/it] 61%|██████ | 318/520 [20:06<13:23, 3.98s/it] {'loss': 1.4178, 'grad_norm': 0.003717266415897059, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:06<13:23, 3.98s/it] 61%|██████▏ | 319/520 [20:10<13:32, 4.04s/it] {'loss': 1.2613, 'grad_norm': 0.003421767542855142, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:10<13:32, 4.04s/it] 62%|██████▏ | 320/520 [20:14<13:18, 3.99s/it] {'loss': 1.1979, 'grad_norm': 0.003834071375441451, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:14<13:18, 3.99s/it] 62%|██████▏ | 321/520 [20:18<13:06, 3.95s/it] {'loss': 1.4166, 'grad_norm': 0.0034578119392568705, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:18<13:06, 3.95s/it] 62%|██████▏ | 322/520 [20:22<12:56, 3.92s/it] {'loss': 1.3796, 'grad_norm': 0.004063496235160033, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:22<12:56, 3.92s/it] 62%|██████▏ | 323/520 [20:26<12:49, 3.90s/it] {'loss': 1.4691, 'grad_norm': 0.004003580956828034, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:26<12:49, 3.90s/it] 62%|██████▏ | 324/520 [20:29<12:41, 3.88s/it] {'loss': 1.3368, 'grad_norm': 0.0038837007166938977, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:29<12:41, 3.88s/it] 62%|██████▎ | 325/520 [20:33<12:34, 3.87s/it] {'loss': 1.3624, 'grad_norm': 0.003839788314847766, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:33<12:34, 3.87s/it] 63%|██████▎ | 326/520 [20:37<12:33, 3.88s/it] {'loss': 1.3246, 'grad_norm': 0.003470454022491942, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:37<12:33, 3.88s/it] 63%|██████▎ | 327/520 [20:41<12:40, 3.94s/it] {'loss': 1.5607, 'grad_norm': 0.004265028461188625, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:41<12:40, 3.94s/it] 63%|██████▎ | 328/520 [20:45<12:48, 4.00s/it] {'loss': 1.4224, 'grad_norm': 0.0037579092498839085, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:45<12:48, 4.00s/it] 63%|██████▎ | 329/520 [20:49<12:38, 3.97s/it] {'loss': 1.2452, 'grad_norm': 0.0029981049273366325, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:49<12:38, 3.97s/it] 63%|██████▎ | 330/520 [20:53<12:15, 3.87s/it] {'loss': 1.3228, 'grad_norm': 0.0031558256241758925, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:53<12:15, 3.87s/it] 64%|██████▎ | 331/520 [20:57<12:00, 3.81s/it] {'loss': 1.3027, 'grad_norm': 0.0033811433332734575, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:57<12:00, 3.81s/it] 64%|██████▍ | 332/520 [21:00<11:47, 3.76s/it] {'loss': 1.5455, 'grad_norm': 0.0033288490334098184, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:00<11:47, 3.76s/it] 64%|██████▍ | 333/520 [21:04<11:36, 3.73s/it] {'loss': 1.4771, 'grad_norm': 0.003773366269942649, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:04<11:36, 3.73s/it] 64%|██████▍ | 334/520 [21:07<11:29, 3.71s/it] {'loss': 1.3474, 'grad_norm': 0.003961995457261636, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:07<11:29, 3.71s/it] 64%|██████▍ | 335/520 [21:11<11:21, 3.68s/it] {'loss': 1.3335, 'grad_norm': 0.003105632422191135, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:11<11:21, 3.68s/it] 65%|██████▍ | 336/520 [21:15<11:17, 3.68s/it] {'loss': 1.2169, 'grad_norm': 0.0038509190252108685, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:15<11:17, 3.68s/it] 65%|██████▍ | 337/520 [21:18<11:13, 3.68s/it] {'loss': 1.2198, 'grad_norm': 0.003395930878906603, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:18<11:13, 3.68s/it] 65%|██████▌ | 338/520 [21:22<11:09, 3.68s/it] {'loss': 1.3654, 'grad_norm': 0.0033760729359701242, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:22<11:09, 3.68s/it] 65%|██████▌ | 339/520 [21:26<11:07, 3.69s/it] {'loss': 1.2909, 'grad_norm': 0.003405374890046798, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:26<11:07, 3.69s/it] 65%|██████▌ | 340/520 [21:30<11:03, 3.69s/it] {'loss': 1.2758, 'grad_norm': 0.003538777173631101, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:30<11:03, 3.69s/it] 66%|██████▌ | 341/520 [21:33<10:59, 3.69s/it] {'loss': 1.3028, 'grad_norm': 0.003510045490495046, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:33<10:59, 3.69s/it] 66%|██████▌ | 342/520 [21:37<10:54, 3.67s/it] {'loss': 1.5278, 'grad_norm': 0.004611451756249727, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:37<10:54, 3.67s/it] 66%|██████▌ | 343/520 [21:41<10:52, 3.68s/it] {'loss': 1.5009, 'grad_norm': 0.004528391680747456, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:41<10:52, 3.68s/it] 66%|██████▌ | 344/520 [21:44<10:48, 3.68s/it] {'loss': 1.2496, 'grad_norm': 0.0033013682783244727, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:44<10:48, 3.68s/it] 66%|██████▋ | 345/520 [21:48<10:44, 3.68s/it] {'loss': 1.3706, 'grad_norm': 0.0036134876044635333, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:48<10:44, 3.68s/it] 67%|██████▋ | 346/520 [21:52<10:39, 3.68s/it] {'loss': 1.4421, 'grad_norm': 0.003527031229740638, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:52<10:39, 3.68s/it] 67%|██████▋ | 347/520 [21:55<10:34, 3.67s/it] {'loss': 1.2602, 'grad_norm': 0.003181950560509681, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:55<10:34, 3.67s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:59<10:31, 3.67s/it] {'loss': 1.2404, 'grad_norm': 0.004044688933236615, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:59<10:31, 3.67s/it] 67%|██████▋ | 349/520 [22:03<10:24, 3.65s/it] {'loss': 1.2932, 'grad_norm': 0.003863521524669386, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:03<10:24, 3.65s/it] 67%|██████▋ | 350/520 [22:06<10:23, 3.66s/it] {'loss': 1.3083, 'grad_norm': 0.003623271899266778, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:06<10:23, 3.66s/it] 68%|██████▊ | 351/520 [22:10<10:19, 3.67s/it] {'loss': 1.212, 'grad_norm': 0.003145366850223705, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:10<10:19, 3.67s/it] 68%|██████▊ | 352/520 [22:14<10:15, 3.66s/it] {'loss': 1.3421, 'grad_norm': 0.0032530938705302038, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:14<10:15, 3.66s/it] 68%|██████▊ | 353/520 [22:17<10:15, 3.69s/it] {'loss': 1.4072, 'grad_norm': 0.0030089560216790367, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:17<10:15, 3.69s/it] 68%|██████▊ | 354/520 [22:21<10:08, 3.67s/it] {'loss': 1.5746, 'grad_norm': 0.0035163006935923645, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:21<10:08, 3.67s/it] 68%|██████▊ | 355/520 [22:25<10:03, 3.66s/it] {'loss': 1.2804, 'grad_norm': 0.0033128179314500274, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:25<10:03, 3.66s/it] 68%|██████▊ | 356/520 [22:28<09:58, 3.65s/it] {'loss': 1.2727, 'grad_norm': 0.0034410104182812057, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:28<09:58, 3.65s/it] 69%|██████▊ | 357/520 [22:32<09:54, 3.64s/it] {'loss': 1.2855, 'grad_norm': 0.0030206500863767453, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:32<09:54, 3.64s/it] 69%|██████▉ | 358/520 [22:35<09:51, 3.65s/it] {'loss': 1.2308, 'grad_norm': 0.003564845130564207, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:35<09:51, 3.65s/it] 69%|██████▉ | 359/520 [22:39<09:47, 3.65s/it] {'loss': 1.4743, 'grad_norm': 0.003777528065909061, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:39<09:47, 3.65s/it] 69%|██████▉ | 360/520 [22:43<09:43, 3.65s/it] {'loss': 1.5067, 'grad_norm': 0.004294269173503515, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:43<09:43, 3.65s/it] 69%|██████▉ | 361/520 [22:46<09:40, 3.65s/it] {'loss': 1.4748, 'grad_norm': 0.003246606611857634, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:46<09:40, 3.65s/it] 70%|██████▉ | 362/520 [22:50<09:35, 3.65s/it] {'loss': 1.2847, 'grad_norm': 0.003550027274547357, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:50<09:35, 3.65s/it] 70%|██████▉ | 363/520 [22:54<09:34, 3.66s/it] {'loss': 1.3258, 'grad_norm': 0.003206277688978881, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:54<09:34, 3.66s/it] 70%|███████ | 364/520 [22:57<09:31, 3.66s/it] {'loss': 1.4948, 'grad_norm': 0.0033162038098907523, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:57<09:31, 3.66s/it] 70%|███████ | 365/520 [23:01<09:26, 3.66s/it] {'loss': 1.392, 'grad_norm': 0.003444185142288142, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [23:01<09:26, 3.66s/it] 70%|███████ | 366/520 [23:05<09:26, 3.68s/it] {'loss': 1.3312, 'grad_norm': 0.003078025977712245, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:05<09:26, 3.68s/it] 71%|███████ | 367/520 [23:08<09:22, 3.68s/it] {'loss': 1.3443, 'grad_norm': 0.00334908105267993, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:08<09:22, 3.68s/it] 71%|███████ | 368/520 [23:12<09:18, 3.67s/it] {'loss': 1.1909, 'grad_norm': 0.0035669668548539766, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:12<09:18, 3.67s/it] 71%|███████ | 369/520 [23:16<09:15, 3.68s/it] {'loss': 1.4492, 'grad_norm': 0.003201006778162486, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:16<09:15, 3.68s/it] 71%|███████ | 370/520 [23:20<09:12, 3.68s/it] {'loss': 1.2467, 'grad_norm': 0.0030468616242047366, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:20<09:12, 3.68s/it] 71%|███████▏ | 371/520 [23:23<09:09, 3.69s/it] {'loss': 1.2386, 'grad_norm': 0.0036065644347985193, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:23<09:09, 3.69s/it] 72%|███████▏ | 372/520 [23:27<09:05, 3.69s/it] {'loss': 1.5592, 'grad_norm': 0.003268453701565564, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:27<09:05, 3.69s/it] 72%|███████▏ | 373/520 [23:31<09:07, 3.73s/it] {'loss': 1.4296, 'grad_norm': 0.0038898174303162226, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:31<09:07, 3.73s/it] 72%|███████▏ | 374/520 [23:35<09:11, 3.78s/it] {'loss': 1.3326, 'grad_norm': 0.003290054764369092, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:35<09:11, 3.78s/it] 72%|███████▏ | 375/520 [23:38<09:12, 3.81s/it] {'loss': 1.2318, 'grad_norm': 0.0033374933409626916, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:38<09:12, 3.81s/it] 72%|███████▏ | 376/520 [23:42<09:04, 3.78s/it] {'loss': 1.35, 'grad_norm': 0.0030375500360259895, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:42<09:04, 3.78s/it] 72%|███████▎ | 377/520 [23:46<08:56, 3.75s/it] {'loss': 1.3032, 'grad_norm': 0.0033961360096472245, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:46<08:56, 3.75s/it] 73%|███████▎ | 378/520 [23:50<08:57, 3.79s/it] {'loss': 1.3514, 'grad_norm': 0.003218098548223944, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:50<08:57, 3.79s/it] 73%|███████▎ | 379/520 [23:54<09:05, 3.87s/it] {'loss': 1.3412, 'grad_norm': 0.003286637410364228, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:54<09:05, 3.87s/it] 73%|███████▎ | 380/520 [23:58<08:54, 3.82s/it] {'loss': 1.5574, 'grad_norm': 0.004945342638629208, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:58<08:54, 3.82s/it] 73%|███████▎ | 381/520 [24:01<08:43, 3.77s/it] {'loss': 1.3345, 'grad_norm': 0.0032769850454020736, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:01<08:43, 3.77s/it] 73%|███████▎ | 382/520 [24:05<08:35, 3.74s/it] {'loss': 1.4625, 'grad_norm': 0.0034887064018515644, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:05<08:35, 3.74s/it] 74%|███████▎ | 383/520 [24:08<08:26, 3.70s/it] {'loss': 1.1675, 'grad_norm': 0.003574475325258834, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:08<08:26, 3.70s/it] 74%|███████▍ | 384/520 [24:12<08:19, 3.68s/it] {'loss': 1.6469, 'grad_norm': 0.003566715192068791, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:12<08:19, 3.68s/it] 74%|███████▍ | 385/520 [24:16<08:15, 3.67s/it] {'loss': 1.3099, 'grad_norm': 0.0032079623940096383, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:16<08:15, 3.67s/it] 74%|███████▍ | 386/520 [24:19<08:10, 3.66s/it] {'loss': 1.2473, 'grad_norm': 0.003012940248327704, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:19<08:10, 3.66s/it] 74%|███████▍ | 387/520 [24:23<08:05, 3.65s/it] {'loss': 1.5627, 'grad_norm': 0.0033823380766811895, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:23<08:05, 3.65s/it] 75%|███████▍ | 388/520 [24:27<08:01, 3.65s/it] {'loss': 1.1991, 'grad_norm': 0.002957356910464583, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:27<08:01, 3.65s/it] 75%|███████▍ | 389/520 [24:30<07:57, 3.65s/it] {'loss': 1.2682, 'grad_norm': 0.003815810487205095, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:30<07:57, 3.65s/it] 75%|███████▌ | 390/520 [24:34<07:55, 3.66s/it] {'loss': 1.3296, 'grad_norm': 0.0030402213330150704, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:34<07:55, 3.66s/it] 75%|███████▌ | 391/520 [24:38<07:52, 3.66s/it] {'loss': 1.4289, 'grad_norm': 0.0032990598638898186, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:38<07:52, 3.66s/it] 75%|███████▌ | 392/520 [24:41<07:49, 3.66s/it] {'loss': 1.2199, 'grad_norm': 0.003289786131266136, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:41<07:49, 3.66s/it] 76%|███████▌ | 393/520 [24:45<07:45, 3.66s/it] {'loss': 1.3256, 'grad_norm': 0.003197405935729962, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:45<07:45, 3.66s/it] 76%|███████▌ | 394/520 [24:49<07:40, 3.65s/it] {'loss': 1.2847, 'grad_norm': 0.0035230263403851717, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:49<07:40, 3.65s/it] 76%|███████▌ | 395/520 [24:52<07:35, 3.64s/it] {'loss': 1.2414, 'grad_norm': 0.0035002077901288625, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:52<07:35, 3.64s/it] 76%|███████▌ | 396/520 [24:56<07:30, 3.64s/it] {'loss': 1.3316, 'grad_norm': 0.0035082050036854356, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:56<07:30, 3.64s/it] 76%|███████▋ | 397/520 [24:59<07:26, 3.63s/it] {'loss': 1.3264, 'grad_norm': 0.003168598940161438, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:59<07:26, 3.63s/it] 77%|███████▋ | 398/520 [25:03<07:22, 3.62s/it] {'loss': 1.3051, 'grad_norm': 0.003256590199952748, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:03<07:22, 3.62s/it] 77%|███████▋ | 399/520 [25:07<07:19, 3.63s/it] {'loss': 1.3896, 'grad_norm': 0.003288597613357632, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:07<07:19, 3.63s/it] 77%|███████▋ | 400/520 [25:10<07:17, 3.64s/it] {'loss': 1.468, 'grad_norm': 0.004008088777677634, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:10<07:17, 3.64s/it] 77%|███████▋ | 401/520 [25:14<07:14, 3.65s/it] {'loss': 1.1042, 'grad_norm': 0.00328965625139118, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:14<07:14, 3.65s/it] 77%|███████▋ | 402/520 [25:18<07:11, 3.66s/it] {'loss': 1.2405, 'grad_norm': 0.0034379641555900443, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:18<07:11, 3.66s/it] 78%|███████▊ | 403/520 [25:21<07:07, 3.65s/it] {'loss': 1.2879, 'grad_norm': 0.0037092564986264858, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:21<07:07, 3.65s/it] 78%|███████▊ | 404/520 [25:25<07:03, 3.65s/it] {'loss': 1.1798, 'grad_norm': 0.004068373848088695, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:25<07:03, 3.65s/it] 78%|███████▊ | 405/520 [25:29<06:59, 3.65s/it] {'loss': 1.3991, 'grad_norm': 0.003496655523924469, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:29<06:59, 3.65s/it] 78%|███████▊ | 406/520 [25:32<06:55, 3.64s/it] {'loss': 1.3219, 'grad_norm': 0.0040308027223930265, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:32<06:55, 3.64s/it] 78%|███████▊ | 407/520 [25:36<06:52, 3.65s/it] {'loss': 1.3885, 'grad_norm': 0.0035056140002798423, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:36<06:52, 3.65s/it] 78%|███████▊ | 408/520 [25:40<06:48, 3.65s/it] {'loss': 1.2661, 'grad_norm': 0.0035263613582921472, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:40<06:48, 3.65s/it] 79%|███████▊ | 409/520 [25:43<06:44, 3.65s/it] {'loss': 1.3993, 'grad_norm': 0.0037620198557940772, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:43<06:44, 3.65s/it] 79%|███████▉ | 410/520 [25:47<06:40, 3.64s/it] {'loss': 1.1096, 'grad_norm': 0.00324095674652144, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:47<06:40, 3.64s/it] 79%|███████▉ | 411/520 [25:50<06:36, 3.64s/it] {'loss': 1.3668, 'grad_norm': 0.004054959690515695, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:50<06:36, 3.64s/it] 79%|███████▉ | 412/520 [25:54<06:34, 3.65s/it] {'loss': 1.2825, 'grad_norm': 0.0033454696496482313, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:54<06:34, 3.65s/it] 79%|███████▉ | 413/520 [25:58<06:31, 3.66s/it] {'loss': 1.4465, 'grad_norm': 0.004232167452709872, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:58<06:31, 3.66s/it] 80%|███████▉ | 414/520 [26:02<06:28, 3.67s/it] {'loss': 1.1985, 'grad_norm': 0.0029628470203978702, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:02<06:28, 3.67s/it] 80%|███████▉ | 415/520 [26:05<06:27, 3.69s/it] {'loss': 1.2575, 'grad_norm': 0.0032863631703056242, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:05<06:27, 3.69s/it] 80%|████████ | 416/520 [26:09<06:25, 3.71s/it] {'loss': 1.1687, 'grad_norm': 0.003721646463861395, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:09<06:25, 3.71s/it] 80%|████████ | 417/520 [26:13<06:20, 3.69s/it] {'loss': 1.3513, 'grad_norm': 0.0039647967821483145, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:13<06:20, 3.69s/it] 80%|████████ | 418/520 [26:16<06:15, 3.68s/it] {'loss': 1.3299, 'grad_norm': 0.0031590661194319976, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:16<06:15, 3.68s/it] 81%|████████ | 419/520 [26:20<06:11, 3.67s/it] {'loss': 1.3083, 'grad_norm': 0.003394866678889375, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:20<06:11, 3.67s/it] 81%|████████ | 420/520 [26:24<06:06, 3.67s/it] {'loss': 1.1873, 'grad_norm': 0.0034148236047199174, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:24<06:06, 3.67s/it] 81%|████████ | 421/520 [26:27<06:02, 3.66s/it] {'loss': 1.1113, 'grad_norm': 0.0037653210040106662, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:27<06:02, 3.66s/it] 81%|████████ | 422/520 [26:31<05:58, 3.65s/it] {'loss': 1.2551, 'grad_norm': 0.0036176651594709073, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:31<05:58, 3.65s/it] 81%|████████▏ | 423/520 [26:35<05:54, 3.65s/it] {'loss': 1.2519, 'grad_norm': 0.0037890049806081788, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:35<05:54, 3.65s/it] 82%|████████▏ | 424/520 [26:38<05:51, 3.66s/it] {'loss': 1.5387, 'grad_norm': 0.0037772154774653455, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:38<05:51, 3.66s/it] 82%|████████▏ | 425/520 [26:42<05:46, 3.65s/it] {'loss': 1.2527, 'grad_norm': 0.003324818382144771, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:42<05:46, 3.65s/it] 82%|████████▏ | 426/520 [26:46<05:44, 3.66s/it] {'loss': 1.2832, 'grad_norm': 0.004557366675565265, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:46<05:44, 3.66s/it] 82%|████████▏ | 427/520 [26:49<05:39, 3.65s/it] {'loss': 1.1797, 'grad_norm': 0.0032139043116529644, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:49<05:39, 3.65s/it] 82%|████████▏ | 428/520 [26:53<05:34, 3.64s/it] {'loss': 1.1585, 'grad_norm': 0.0033932370742675508, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:53<05:34, 3.64s/it] 82%|████████▎ | 429/520 [26:57<05:33, 3.66s/it] {'loss': 1.2673, 'grad_norm': 0.0032706240322772397, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:57<05:33, 3.66s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:00<05:28, 3.65s/it] {'loss': 1.2548, 'grad_norm': 0.0030554739156995778, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:00<05:28, 3.65s/it] 83%|████████▎ | 431/520 [27:04<05:24, 3.65s/it] {'loss': 1.4186, 'grad_norm': 0.003513811995742794, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:04<05:24, 3.65s/it] 83%|████████▎ | 432/520 [27:07<05:21, 3.65s/it] {'loss': 1.1661, 'grad_norm': 0.0036698947324264627, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:07<05:21, 3.65s/it] 83%|████████▎ | 433/520 [27:11<05:18, 3.66s/it] {'loss': 1.3148, 'grad_norm': 0.003334612845621327, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:11<05:18, 3.66s/it] 83%|████████▎ | 434/520 [27:15<05:14, 3.66s/it] {'loss': 1.0286, 'grad_norm': 0.003205472053231788, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:15<05:14, 3.66s/it] 84%|████████▎ | 435/520 [27:18<05:10, 3.65s/it] {'loss': 1.3505, 'grad_norm': 0.0036919976449321756, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:18<05:10, 3.65s/it] 84%|████████▍ | 436/520 [27:22<05:06, 3.65s/it] {'loss': 1.1188, 'grad_norm': 0.00319520371060093, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:22<05:06, 3.65s/it] 84%|████████▍ | 437/520 [27:26<05:04, 3.66s/it] {'loss': 1.3731, 'grad_norm': 0.0033364178679496192, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:26<05:04, 3.66s/it] 84%|████████▍ | 438/520 [27:29<05:00, 3.67s/it] {'loss': 1.1567, 'grad_norm': 0.003124679036042631, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:29<05:00, 3.67s/it] 84%|████████▍ | 439/520 [27:33<04:56, 3.66s/it] {'loss': 1.345, 'grad_norm': 0.0028002382387028136, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:33<04:56, 3.66s/it] 85%|████████▍ | 440/520 [27:37<04:52, 3.66s/it] {'loss': 1.2345, 'grad_norm': 0.0032759474049057448, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:37<04:52, 3.66s/it] 85%|████████▍ | 441/520 [27:40<04:49, 3.66s/it] {'loss': 1.3969, 'grad_norm': 0.003502407237635897, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:40<04:49, 3.66s/it] 85%|████████▌ | 442/520 [27:44<04:45, 3.66s/it] {'loss': 1.2751, 'grad_norm': 0.003956221158169471, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:44<04:45, 3.66s/it] 85%|████████▌ | 443/520 [27:48<04:42, 3.67s/it] {'loss': 1.2987, 'grad_norm': 0.0033202213165084375, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:48<04:42, 3.67s/it] 85%|████████▌ | 444/520 [27:51<04:39, 3.68s/it] {'loss': 1.2645, 'grad_norm': 0.0028650290675659527, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:51<04:39, 3.68s/it] 86%|████████▌ | 445/520 [27:55<04:35, 3.67s/it] {'loss': 1.1783, 'grad_norm': 0.003270377517858011, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:55<04:35, 3.67s/it] 86%|████████▌ | 446/520 [27:59<04:31, 3.67s/it] {'loss': 1.4798, 'grad_norm': 0.0032696872046078036, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [27:59<04:31, 3.67s/it] 86%|████████▌ | 447/520 [28:02<04:27, 3.66s/it] {'loss': 1.287, 'grad_norm': 0.0033374482100107217, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:02<04:27, 3.66s/it] 86%|████████▌ | 448/520 [28:06<04:23, 3.66s/it] {'loss': 1.2484, 'grad_norm': 0.0032997822201777066, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:06<04:23, 3.66s/it] 86%|████████▋ | 449/520 [28:10<04:21, 3.68s/it] {'loss': 1.44, 'grad_norm': 0.0037110328779368495, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:10<04:21, 3.68s/it] 87%|████████▋ | 450/520 [28:14<04:20, 3.72s/it] {'loss': 1.3029, 'grad_norm': 0.0033913834653527862, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:14<04:20, 3.72s/it] 87%|████████▋ | 451/520 [28:17<04:18, 3.75s/it] {'loss': 1.3003, 'grad_norm': 0.0033687203674610676, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:17<04:18, 3.75s/it] 87%|████████▋ | 452/520 [28:21<04:16, 3.77s/it] {'loss': 1.4707, 'grad_norm': 0.003340962707222757, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:21<04:16, 3.77s/it] 87%|████████▋ | 453/520 [28:25<04:14, 3.80s/it] {'loss': 1.4439, 'grad_norm': 0.0035121024652189797, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:25<04:14, 3.80s/it] 87%|████████▋ | 454/520 [28:29<04:10, 3.80s/it] {'loss': 1.1965, 'grad_norm': 0.0034407348655970177, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:29<04:10, 3.80s/it] 88%|████████▊ | 455/520 [28:33<04:07, 3.80s/it] {'loss': 1.3469, 'grad_norm': 0.0033412390519655805, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:33<04:07, 3.80s/it] 88%|████████▊ | 456/520 [28:36<04:02, 3.79s/it] {'loss': 1.2479, 'grad_norm': 0.00407325879476863, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:36<04:02, 3.79s/it] 88%|████████▊ | 457/520 [28:40<03:56, 3.75s/it] {'loss': 1.4668, 'grad_norm': 0.0032458277685050393, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:40<03:56, 3.75s/it] 88%|████████▊ | 458/520 [28:44<03:51, 3.73s/it] {'loss': 1.4165, 'grad_norm': 0.003493359908123663, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:44<03:51, 3.73s/it] 88%|████████▊ | 459/520 [28:48<03:46, 3.72s/it] {'loss': 1.3206, 'grad_norm': 0.003149415791451138, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:48<03:46, 3.72s/it] 88%|████████▊ | 460/520 [28:51<03:41, 3.70s/it] {'loss': 1.1908, 'grad_norm': 0.003354226138969294, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:51<03:41, 3.70s/it] 89%|████████▊ | 461/520 [28:55<03:37, 3.69s/it] {'loss': 1.549, 'grad_norm': 0.0030640213572285784, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:55<03:37, 3.69s/it] 89%|████████▉ | 462/520 [28:59<03:33, 3.69s/it] {'loss': 1.5188, 'grad_norm': 0.003513507171523524, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [28:59<03:33, 3.69s/it] 89%|████████▉ | 463/520 [29:02<03:29, 3.67s/it] {'loss': 1.1498, 'grad_norm': 0.003463029590737244, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:02<03:29, 3.67s/it] 89%|████████▉ | 464/520 [29:06<03:25, 3.67s/it] {'loss': 1.3198, 'grad_norm': 0.003539919651341116, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:06<03:25, 3.67s/it] 89%|████████▉ | 465/520 [29:10<03:22, 3.68s/it] {'loss': 1.4378, 'grad_norm': 0.003905377017452522, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:10<03:22, 3.68s/it] 90%|████████▉ | 466/520 [29:13<03:18, 3.67s/it] {'loss': 1.3008, 'grad_norm': 0.002928087991292121, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:13<03:18, 3.67s/it] 90%|████████▉ | 467/520 [29:17<03:14, 3.67s/it] {'loss': 1.3897, 'grad_norm': 0.0033485776035354934, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:17<03:14, 3.67s/it] 90%|█████████ | 468/520 [29:20<03:10, 3.66s/it] {'loss': 1.2883, 'grad_norm': 0.0040890112257634705, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:20<03:10, 3.66s/it] 90%|█████████ | 469/520 [29:24<03:05, 3.65s/it] {'loss': 1.3419, 'grad_norm': 0.003584602965022988, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:24<03:05, 3.65s/it] 90%|█████████ | 470/520 [29:28<03:02, 3.65s/it] {'loss': 1.201, 'grad_norm': 0.0029800809425108575, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:28<03:02, 3.65s/it] 91%|█████████ | 471/520 [29:31<02:58, 3.64s/it] {'loss': 1.2223, 'grad_norm': 0.0032733880442285673, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:31<02:58, 3.64s/it] 91%|█████████ | 472/520 [29:35<02:55, 3.66s/it] {'loss': 1.1978, 'grad_norm': 0.0031876603963423423, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:35<02:55, 3.66s/it] 91%|█████████ | 473/520 [29:39<02:51, 3.66s/it] {'loss': 1.2509, 'grad_norm': 0.00320577196471532, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:39<02:51, 3.66s/it] 91%|█████████ | 474/520 [29:42<02:48, 3.66s/it] {'loss': 1.4245, 'grad_norm': 0.003188932559020111, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:42<02:48, 3.66s/it] 91%|█████████▏| 475/520 [29:46<02:45, 3.67s/it] {'loss': 1.3323, 'grad_norm': 0.003202463644919484, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:46<02:45, 3.67s/it] 92%|█████████▏| 476/520 [29:50<02:41, 3.67s/it] {'loss': 1.255, 'grad_norm': 0.003436271160255859, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:50<02:41, 3.67s/it] 92%|█████████▏| 477/520 [29:53<02:38, 3.68s/it] {'loss': 1.2346, 'grad_norm': 0.004037317223575373, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:53<02:38, 3.68s/it] 92%|█████████▏| 478/520 [29:57<02:34, 3.68s/it] {'loss': 1.195, 'grad_norm': 0.0032749968169378993, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [29:57<02:34, 3.68s/it] 92%|█████████▏| 479/520 [30:01<02:30, 3.68s/it] {'loss': 1.4189, 'grad_norm': 0.0036106574142114487, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:01<02:30, 3.68s/it] 92%|█████████▏| 480/520 [30:05<02:27, 3.69s/it] {'loss': 1.4179, 'grad_norm': 0.003276073214097853, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:05<02:27, 3.69s/it] 92%|█████████▎| 481/520 [30:08<02:23, 3.68s/it] {'loss': 1.4532, 'grad_norm': 0.003194807817222194, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:08<02:23, 3.68s/it] 93%|█████████▎| 482/520 [30:12<02:20, 3.69s/it] {'loss': 1.4419, 'grad_norm': 0.0035166575379340886, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:12<02:20, 3.69s/it] 93%|█████████▎| 483/520 [30:16<02:16, 3.68s/it] {'loss': 1.2807, 'grad_norm': 0.0036002637234769214, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:16<02:16, 3.68s/it] 93%|█████████▎| 484/520 [30:19<02:12, 3.69s/it] {'loss': 1.263, 'grad_norm': 0.0032882487696737207, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:19<02:12, 3.69s/it] 93%|█████████▎| 485/520 [30:23<02:09, 3.69s/it] {'loss': 1.2198, 'grad_norm': 0.0032677219069160114, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:23<02:09, 3.69s/it] 93%|█████████▎| 486/520 [30:27<02:05, 3.69s/it] {'loss': 1.3448, 'grad_norm': 0.003389002590596588, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:27<02:05, 3.69s/it] 94%|█████████▎| 487/520 [30:30<02:01, 3.69s/it] {'loss': 1.1915, 'grad_norm': 0.003084106829756111, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:30<02:01, 3.69s/it] 94%|█████████▍| 488/520 [30:34<01:58, 3.70s/it] {'loss': 1.1225, 'grad_norm': 0.003235810348961201, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:34<01:58, 3.70s/it] 94%|█████████▍| 489/520 [30:38<01:55, 3.74s/it] {'loss': 1.4292, 'grad_norm': 0.0029279238154983835, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:38<01:55, 3.74s/it] 94%|█████████▍| 490/520 [30:42<01:52, 3.77s/it] {'loss': 1.2672, 'grad_norm': 0.0034350709939085866, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:42<01:52, 3.77s/it] 94%|█████████▍| 491/520 [30:46<01:50, 3.79s/it] {'loss': 1.2223, 'grad_norm': 0.0034771198578903355, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:46<01:50, 3.79s/it] 95%|█████████▍| 492/520 [30:49<01:46, 3.82s/it] {'loss': 1.3482, 'grad_norm': 0.00342900206384076, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:49<01:46, 3.82s/it] 95%|█████████▍| 493/520 [30:53<01:43, 3.84s/it] {'loss': 1.5151, 'grad_norm': 0.003592917257066282, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:53<01:43, 3.84s/it] 95%|█████████▌| 494/520 [30:57<01:40, 3.86s/it] {'loss': 1.2869, 'grad_norm': 0.0032890249978586358, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [30:57<01:40, 3.86s/it] 95%|█████████▌| 495/520 [31:01<01:36, 3.87s/it] {'loss': 1.2272, 'grad_norm': 0.0031804693634701745, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:01<01:36, 3.87s/it] 95%|█████████▌| 496/520 [31:05<01:33, 3.88s/it] {'loss': 1.1555, 'grad_norm': 0.0034107330253163614, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:05<01:33, 3.88s/it] 96%|█████████▌| 497/520 [31:09<01:29, 3.88s/it] {'loss': 1.3432, 'grad_norm': 0.0029264723822100287, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:09<01:29, 3.88s/it] 96%|█████████▌| 498/520 [31:13<01:25, 3.88s/it] {'loss': 1.2353, 'grad_norm': 0.0034349823143660107, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:13<01:25, 3.88s/it] 96%|█████████▌| 499/520 [31:17<01:21, 3.88s/it] {'loss': 1.4981, 'grad_norm': 0.0033784024606133387, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:17<01:21, 3.88s/it] 96%|█████████▌| 500/520 [31:21<01:17, 3.88s/it] {'loss': 1.3561, 'grad_norm': 0.003856382405437518, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:21<01:17, 3.88s/it] 96%|█████████▋| 501/520 [31:24<01:13, 3.88s/it] {'loss': 1.426, 'grad_norm': 0.003985730889471056, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:24<01:13, 3.88s/it] 97%|█████████▋| 502/520 [31:28<01:09, 3.87s/it] {'loss': 1.2743, 'grad_norm': 0.003203609999595284, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:28<01:09, 3.87s/it] 97%|█████████▋| 503/520 [31:32<01:05, 3.87s/it] {'loss': 1.3796, 'grad_norm': 0.003370064411127467, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:32<01:05, 3.87s/it] 97%|█████████▋| 504/520 [31:36<01:02, 3.88s/it] {'loss': 1.2734, 'grad_norm': 0.003642533113895306, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:36<01:02, 3.88s/it] 97%|█████████▋| 505/520 [31:40<00:58, 3.87s/it] {'loss': 1.3296, 'grad_norm': 0.0034906518000717424, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:40<00:58, 3.87s/it] 97%|█████████▋| 506/520 [31:44<00:54, 3.88s/it] {'loss': 1.2246, 'grad_norm': 0.003584237950470186, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:44<00:54, 3.88s/it] 98%|█████████▊| 507/520 [31:48<00:50, 3.92s/it] {'loss': 1.5527, 'grad_norm': 0.0033001927882109817, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:48<00:50, 3.92s/it] 98%|█████████▊| 508/520 [31:52<00:46, 3.91s/it] {'loss': 1.3577, 'grad_norm': 0.003273407847442535, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:52<00:46, 3.91s/it] 98%|█████████▊| 509/520 [31:56<00:43, 3.92s/it] {'loss': 1.3125, 'grad_norm': 0.003219912413230537, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:56<00:43, 3.92s/it] 98%|█████████▊| 510/520 [32:00<00:39, 3.91s/it] {'loss': 1.2701, 'grad_norm': 0.0032668790398913197, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:00<00:39, 3.91s/it] 98%|█████████▊| 511/520 [32:03<00:35, 3.90s/it] {'loss': 1.2408, 'grad_norm': 0.0030292012256916763, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:03<00:35, 3.90s/it] 98%|█████████▊| 512/520 [32:07<00:31, 3.91s/it] {'loss': 1.1197, 'grad_norm': 0.003180785081441138, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:07<00:31, 3.91s/it] 99%|█████████▊| 513/520 [32:11<00:27, 3.91s/it] {'loss': 1.3378, 'grad_norm': 0.0036672063492688874, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:11<00:27, 3.91s/it] 99%|█████████▉| 514/520 [32:15<00:23, 3.92s/it] {'loss': 1.3126, 'grad_norm': 0.0029770745969938247, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:15<00:23, 3.92s/it] 99%|█████████▉| 515/520 [32:19<00:19, 3.91s/it] {'loss': 1.3727, 'grad_norm': 0.0039513402268834185, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:19<00:19, 3.91s/it] 99%|█████████▉| 516/520 [32:23<00:15, 3.92s/it] {'loss': 1.2373, 'grad_norm': 0.0032797038543554155, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:23<00:15, 3.92s/it] 99%|█████████▉| 517/520 [32:27<00:11, 3.91s/it] {'loss': 1.4611, 'grad_norm': 0.0032950070588581544, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:27<00:11, 3.91s/it] 100%|█████████▉| 518/520 [32:31<00:07, 3.89s/it] {'loss': 1.2778, 'grad_norm': 0.0035134455773048376, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:31<00:07, 3.89s/it] 100%|█████████▉| 519/520 [32:35<00:03, 3.88s/it] {'loss': 1.3974, 'grad_norm': 0.0033941708985170227, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:35<00:03, 3.88s/it] 100%|██████████| 520/520 [32:39<00:00, 4.14s/it] {'loss': 1.4973, 'grad_norm': 0.0035634399645952386, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:39<00:00, 4.14s/it] {'train_runtime': 1959.9013, 'train_samples_per_second': 33.945, 'train_steps_per_second': 0.265, 'train_loss': 1.4841521593240592, 'epoch': 1.0} + 100%|██████████| 520/520 [32:39<00:00, 4.14s/it] 100%|██████████| 520/520 [32:39<00:00, 3.77s/it] +[2025-10-10 07:24:17,650] [INFO] [launch.py:348:main] Process 587484 exits successfully. +[2025-10-10 07:24:17,650] [INFO] [launch.py:348:main] Process 587481 exits successfully. +[2025-10-10 07:24:17,651] [INFO] [launch.py:348:main] Process 587485 exits successfully. +[2025-10-10 07:24:18,652] [INFO] [launch.py:348:main] Process 587483 exits successfully. +[2025-10-10 07:24:18,653] [INFO] [launch.py:348:main] Process 587482 exits successfully. +[2025-10-10 07:24:18,653] [INFO] [launch.py:348:main] Process 587487 exits successfully. +[2025-10-10 07:24:18,654] [INFO] [launch.py:348:main] Process 587486 exits successfully. +[2025-10-10 07:24:22,659] [INFO] [launch.py:348:main] Process 587480 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.5_2e-1_connector-1.0_0.5_2e-1_ablation_20251010_064225.log +Timestamp: 2025-10-10 07:24:25 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060116.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060116.log new file mode 100644 index 0000000000000000000000000000000000000000..fcf29d05e7beedfd863bc987ca57040993f520ce --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060116.log @@ -0,0 +1,11 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060116.log +Timestamp: 2025-10-10 06:01:16 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:01:19,451] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:01:22,190] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 06:01:22,192] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.7 --temperature_mlp_text 0.7 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.7 --temperature_mlp_vision 0.7 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.7 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060440.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060440.log new file mode 100644 index 0000000000000000000000000000000000000000..57c9e086a541e7241e163e203be7cbebe197c561 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060440.log @@ -0,0 +1,1167 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060440.log +Timestamp: 2025-10-10 06:04:40 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:04:43,242] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:04:45,973] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 06:04:45,975] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.7 --temperature_mlp_text 0.7 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.7 --temperature_mlp_vision 0.7 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.7 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:04:48,551] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:04:49,648] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 06:04:49,648] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 06:04:49,648] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 06:04:49,648] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 06:04:49,648] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 06:04:49,648] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 06:04:49,648] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 06:04:49,650] [INFO] [launch.py:253:main] process 558847 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:04:49,652] [INFO] [launch.py:253:main] process 558848 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:04:49,654] [INFO] [launch.py:253:main] process 558849 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:04:49,656] [INFO] [launch.py:253:main] process 558850 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:04:49,658] [INFO] [launch.py:253:main] process 558851 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:04:49,660] [INFO] [launch.py:253:main] process 558852 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:04:49,662] [INFO] [launch.py:253:main] process 558853 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:04:49,664] [INFO] [launch.py:253:main] process 558854 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:04:56,361] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:04:56,664] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:04:56,672] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:04:56,673] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:04:56,704] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:04:56,721] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:04:56,729] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:04:56,734] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:04:56,769] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:04:57,071] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:04:57,071] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:04:57,072] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:04:57,072] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 06:04:57,114] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:04:57,118] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:04:57,135] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:04:57,135] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.7, 'temperature_mlp': 0.7, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.7, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.7, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.7, + "temperature_mlp": 0.7, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:558847:558847 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558847:558847 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558847:558847 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:558847:558847 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:558847:558847 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:558847:558847 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +ywang29-vrdb-test2-worker-0:558848:558848 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:558848:558848 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558851:558851 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:558848:558848 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558851:558851 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558851:558851 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558848:558848 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:558848:558848 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:558848:558848 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:558851:558851 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:558851:558851 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:558851:558851 [4] NCCL INFO NET/Plugin: Using internal network plugin. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Using network Socket +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:558853:558853 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:558853:558853 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558853:558853 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558853:558853 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:558853:558853 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:558853:558853 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:558852:558852 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:558852:558852 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558852:558852 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558852:558852 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:558852:558852 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:558852:558852 [5] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:558850:558850 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:558850:558850 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558850:558850 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558850:558850 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:558850:558850 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:558850:558850 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:558854:558854 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:558854:558854 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558854:558854 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558854:558854 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:558854:558854 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:558854:558854 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:558849:558849 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:558849:558849 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558849:558849 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558849:558849 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:558849:558849 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:558849:558849 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO ncclCommInitRank comm 0x5648d67d0c20 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x87695d3da880bf58 - Init START +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO ncclCommInitRank comm 0x556402c2ecd0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x87695d3da880bf58 - Init START +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO ncclCommInitRank comm 0x55d15139ef60 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x87695d3da880bf58 - Init START +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO ncclCommInitRank comm 0x55c928e655c0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x87695d3da880bf58 - Init START +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO ncclCommInitRank comm 0x56151c128810 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x87695d3da880bf58 - Init START +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO ncclCommInitRank comm 0x560c8c1626d0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x87695d3da880bf58 - Init START +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO ncclCommInitRank comm 0x556dcf65ce20 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x87695d3da880bf58 - Init START +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO ncclCommInitRank comm 0x55bf0a5ab1c0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x87695d3da880bf58 - Init START +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO comm 0x560c8c1626d0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO comm 0x556402c2ecd0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO comm 0x5648d67d0c20 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO comm 0x56151c128810 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO comm 0x55d15139ef60 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO comm 0x55c928e655c0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO comm 0x55bf0a5ab1c0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO comm 0x556dcf65ce20 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:558852:560506 [5] NCCL INFO ncclCommInitRank comm 0x55d15139ef60 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x87695d3da880bf58 - Init COMPLETE +ywang29-vrdb-test2-worker-0:558854:560511 [7] NCCL INFO ncclCommInitRank comm 0x560c8c1626d0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x87695d3da880bf58 - Init COMPLETE +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:558853:560505 [6] NCCL INFO ncclCommInitRank comm 0x5648d67d0c20 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x87695d3da880bf58 - Init COMPLETE +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:558849:560531 [2] NCCL INFO ncclCommInitRank comm 0x556402c2ecd0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x87695d3da880bf58 - Init COMPLETE +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:558848:560486 [1] NCCL INFO ncclCommInitRank comm 0x56151c128810 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x87695d3da880bf58 - Init COMPLETE +ywang29-vrdb-test2-worker-0:558847:560485 [0] NCCL INFO ncclCommInitRank comm 0x55c928e655c0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x87695d3da880bf58 - Init COMPLETE +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:558850:560510 [3] NCCL INFO ncclCommInitRank comm 0x556dcf65ce20 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x87695d3da880bf58 - Init COMPLETE +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:558851:560487 [4] NCCL INFO ncclCommInitRank comm 0x55bf0a5ab1c0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x87695d3da880bf58 - Init COMPLETE +[2025-10-10 06:05:38,164] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 06:05:39,987] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin...Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... + +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +Pre-training init connector._connector.0.scores: Mean=1.000005 +Pre-training init connector._connector.2.scores: Mean=0.999970 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +[2025-10-10 06:05:42,720] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 558847 +[2025-10-10 06:05:42,853] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 558848 +[2025-10-10 06:05:43,027] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 558849 +[2025-10-10 06:05:43,028] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 558850 +[2025-10-10 06:05:43,028] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 558851 +[2025-10-10 06:05:43,029] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 558852 +[2025-10-10 06:05:43,030] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 558853 +[2025-10-10 06:05:43,031] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 558854 +[2025-10-10 06:05:43,032] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_060440.log +Timestamp: 2025-10-10 06:05:44 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_072425.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_072425.log new file mode 100644 index 0000000000000000000000000000000000000000..1b79589f67669f41e0022190322a42160c2d8e7e --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_072425.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_072425.log +Timestamp: 2025-10-10 07:24:25 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 07:24:27,793] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:31,131] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 07:24:31,133] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.7 --temperature_mlp_text 0.7 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.7 --temperature_mlp_vision 0.7 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.7 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 07:24:33,756] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:34,871] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 07:24:34,871] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 07:24:34,871] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 07:24:34,872] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 07:24:34,872] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 07:24:34,872] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 07:24:34,872] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 07:24:34,874] [INFO] [launch.py:253:main] process 619193 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:34,876] [INFO] [launch.py:253:main] process 619194 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:34,878] [INFO] [launch.py:253:main] process 619195 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:34,880] [INFO] [launch.py:253:main] process 619196 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:34,882] [INFO] [launch.py:253:main] process 619197 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:34,884] [INFO] [launch.py:253:main] process 619198 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:34,885] [INFO] [launch.py:253:main] process 619199 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:34,887] [INFO] [launch.py:253:main] process 619200 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.7', '--temperature_mlp_text', '0.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.7', '--temperature_mlp_vision', '0.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 07:24:41,452] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:41,834] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:41,841] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:41,841] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:41,861] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:41,877] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:41,877] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:41,877] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:41,878] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:42,242] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:42,246] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:42,247] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:42,280] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:42,280] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:42,281] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:42,282] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:42,282] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.7, 'temperature_mlp': 0.7, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.7, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.7, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.7, + "temperature_mlp": 0.7, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test2-worker-0:619193:619193 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619193:619193 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619193:619193 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:619193:619193 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:619193:619193 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:619193:619193 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test2-worker-0:619200:619200 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:619200:619200 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619200:619200 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619200:619200 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:619200:619200 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:619200:619200 [7] NCCL INFO NET/Plugin: Using internal network plugin. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:619196:619196 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:619196:619196 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619196:619196 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619196:619196 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:619196:619196 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:619196:619196 [3] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:619197:619197 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:619197:619197 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619197:619197 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619197:619197 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:619197:619197 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:619197:619197 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:619199:619199 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:619199:619199 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619199:619199 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619199:619199 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:619199:619199 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:619199:619199 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:619194:619194 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:619194:619194 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619194:619194 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619194:619194 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:619194:619194 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:619194:619194 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:619195:619195 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:619195:619195 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619195:619195 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619195:619195 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:619195:619195 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:619195:619195 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:619198:619198 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:619198:619198 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619198:619198 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619198:619198 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:619198:619198 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:619198:619198 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO ncclCommInitRank comm 0x563d19083070 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xabf8580c5162c45f - Init START +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO ncclCommInitRank comm 0x561f0ab66a30 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xabf8580c5162c45f - Init START +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO ncclCommInitRank comm 0x55f2cca83f80 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xabf8580c5162c45f - Init START +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO ncclCommInitRank comm 0x556f26abd000 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xabf8580c5162c45f - Init START +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO ncclCommInitRank comm 0x5587299f3f40 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xabf8580c5162c45f - Init START +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO ncclCommInitRank comm 0x557b3c54eb50 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xabf8580c5162c45f - Init START +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO ncclCommInitRank comm 0x562abc318740 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xabf8580c5162c45f - Init START +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO ncclCommInitRank comm 0x557fd1bf9400 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xabf8580c5162c45f - Init START +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO comm 0x561f0ab66a30 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO comm 0x5587299f3f40 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO comm 0x557b3c54eb50 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO comm 0x557fd1bf9400 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO comm 0x562abc318740 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO comm 0x556f26abd000 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO comm 0x563d19083070 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO comm 0x55f2cca83f80 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:619200:620826 [7] NCCL INFO ncclCommInitRank comm 0x557b3c54eb50 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xabf8580c5162c45f - Init COMPLETE +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:619199:620829 [6] NCCL INFO ncclCommInitRank comm 0x562abc318740 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xabf8580c5162c45f - Init COMPLETE +ywang29-vrdb-test2-worker-0:619197:620828 [4] NCCL INFO ncclCommInitRank comm 0x563d19083070 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xabf8580c5162c45f - Init COMPLETE +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:619196:620827 [3] NCCL INFO ncclCommInitRank comm 0x55f2cca83f80 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xabf8580c5162c45f - Init COMPLETE +ywang29-vrdb-test2-worker-0:619195:620831 [2] NCCL INFO ncclCommInitRank comm 0x561f0ab66a30 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xabf8580c5162c45f - Init COMPLETE +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:619198:620832 [5] NCCL INFO ncclCommInitRank comm 0x556f26abd000 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xabf8580c5162c45f - Init COMPLETE +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:619194:620830 [1] NCCL INFO ncclCommInitRank comm 0x5587299f3f40 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xabf8580c5162c45f - Init COMPLETE +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:619193:620825 [0] NCCL INFO ncclCommInitRank comm 0x557fd1bf9400 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xabf8580c5162c45f - Init COMPLETE +[2025-10-10 07:25:27,654] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 07:32:28,149] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin...Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... + +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Pre-training init connector._connector.0.scores: Mean=1.000005 +Pre-training init connector._connector.2.scores: Mean=0.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 07:32:46,197 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 07:32:46,205 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:005->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:619200:626135 [7] NCCL INFO ncclCommInitRank comm 0x7f0dc40698c0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x40ee01b089d9251d - Init COMPLETE +ywang29-vrdb-test2-worker-0:619198:626139 [5] NCCL INFO ncclCommInitRank comm 0x7f7b3806ad80 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x40ee01b089d9251d - Init COMPLETE +ywang29-vrdb-test2-worker-0:619196:626137 [3] NCCL INFO ncclCommInitRank comm 0x7f3ed406a6c0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x40ee01b089d9251d - Init COMPLETE +ywang29-vrdb-test2-worker-0:619195:626133 [2] NCCL INFO ncclCommInitRank comm 0x7f7eec06ac20 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x40ee01b089d9251d - Init COMPLETE +ywang29-vrdb-test2-worker-0:619194:626134 [1] NCCL INFO ncclCommInitRank comm 0x7f540006aa00 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x40ee01b089d9251d - Init COMPLETE +ywang29-vrdb-test2-worker-0:619199:626138 [6] NCCL INFO ncclCommInitRank comm 0x7f1d8006afc0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x40ee01b089d9251d - Init COMPLETE +ywang29-vrdb-test2-worker-0:619193:626132 [0] NCCL INFO ncclCommInitRank comm 0x7f1fac06af60 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x40ee01b089d9251d - Init COMPLETE +ywang29-vrdb-test2-worker-0:619197:626136 [4] NCCL INFO ncclCommInitRank comm 0x7fd8a806aae0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x40ee01b089d9251d - Init COMPLETE + 0%| | 1/520 [00:14<2:01:17, 14.02s/it] {'loss': 3.1438, 'grad_norm': 0.5046490574418507, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:01:17, 14.02s/it] 0%| | 2/520 [00:17<1:08:53, 7.98s/it] {'loss': 2.9309, 'grad_norm': 0.44068877095939235, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:08:53, 7.98s/it] 1%| | 3/520 [00:21<52:01, 6.04s/it] {'loss': 2.038, 'grad_norm': 0.11472077563510909, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<52:01, 6.04s/it] 1%| | 4/520 [00:25<44:05, 5.13s/it] {'loss': 2.0561, 'grad_norm': 0.08674251203199532, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<44:05, 5.13s/it] 1%| | 5/520 [00:28<39:38, 4.62s/it] {'loss': 1.9654, 'grad_norm': 0.055395244279068286, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:28<39:38, 4.62s/it] 1%| | 6/520 [00:32<36:48, 4.30s/it] {'loss': 1.8475, 'grad_norm': 0.03536555821323761, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:32<36:48, 4.30s/it] 1%|▏ | 7/520 [00:36<34:59, 4.09s/it] {'loss': 1.8167, 'grad_norm': 0.028695499539029442, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<34:59, 4.09s/it] 2%|▏ | 8/520 [00:40<35:31, 4.16s/it] {'loss': 1.8272, 'grad_norm': 0.022860107961730856, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<35:31, 4.16s/it] 2%|▏ | 9/520 [00:44<35:29, 4.17s/it] {'loss': 1.8429, 'grad_norm': 0.02028852378410791, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:44<35:29, 4.17s/it] 2%|▏ | 10/520 [00:48<34:05, 4.01s/it] {'loss': 1.628, 'grad_norm': 0.019762026220394954, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<34:05, 4.01s/it] 2%|▏ | 11/520 [00:52<33:27, 3.94s/it] {'loss': 1.7906, 'grad_norm': 0.020590906837962695, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:52<33:27, 3.94s/it] 2%|▏ | 12/520 [00:55<32:39, 3.86s/it] {'loss': 1.8437, 'grad_norm': 0.022454928760372736, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:55<32:39, 3.86s/it][2025-10-10 07:33:50,816] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:00<33:49, 4.00s/it] {'loss': 1.7693, 'grad_norm': 0.018748414788282368, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:00<33:49, 4.00s/it] 3%|▎ | 14/520 [01:03<32:55, 3.90s/it] {'loss': 1.8587, 'grad_norm': 0.02683342731645301, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:03<32:55, 3.90s/it] 3%|▎ | 15/520 [01:07<32:28, 3.86s/it] {'loss': 2.083, 'grad_norm': 0.048827796742094504, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:07<32:28, 3.86s/it] 3%|▎ | 16/520 [01:11<31:52, 3.79s/it] {'loss': 2.4165, 'grad_norm': 0.09558524259913992, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:11<31:52, 3.79s/it] 3%|▎ | 17/520 [01:14<31:30, 3.76s/it] {'loss': 2.4451, 'grad_norm': 0.07777235386578758, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:14<31:30, 3.76s/it] 3%|▎ | 18/520 [01:18<31:10, 3.73s/it] {'loss': 2.0309, 'grad_norm': 0.09487180301782494, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:18<31:10, 3.73s/it] 4%|▎ | 19/520 [01:22<30:58, 3.71s/it] {'loss': 2.486, 'grad_norm': 0.042416804122188186, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:22<30:58, 3.71s/it] 4%|▍ | 20/520 [01:26<30:57, 3.72s/it] {'loss': 2.201, 'grad_norm': 0.05653647911108526, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:26<30:57, 3.72s/it] 4%|▍ | 21/520 [01:29<31:00, 3.73s/it] {'loss': 2.9608, 'grad_norm': 0.09677381652054755, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:29<31:00, 3.73s/it] 4%|▍ | 22/520 [01:33<30:53, 3.72s/it] {'loss': 2.8671, 'grad_norm': 0.12172486182776944, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:33<30:53, 3.72s/it] 4%|▍ | 23/520 [01:37<31:18, 3.78s/it] {'loss': 2.3002, 'grad_norm': 0.0383988453454006, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:37<31:18, 3.78s/it] 5%|▍ | 24/520 [01:41<31:29, 3.81s/it] {'loss': 2.7027, 'grad_norm': 0.03767364325891248, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:41<31:29, 3.81s/it] 5%|▍ | 25/520 [01:45<31:32, 3.82s/it] {'loss': 2.2058, 'grad_norm': 0.02247631052313038, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:45<31:32, 3.82s/it] 5%|▌ | 26/520 [01:49<31:33, 3.83s/it] {'loss': 2.1305, 'grad_norm': 0.01463156196090105, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:49<31:33, 3.83s/it] 5%|▌ | 27/520 [01:52<31:30, 3.83s/it] {'loss': 1.9979, 'grad_norm': 0.01740528913995654, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:52<31:30, 3.83s/it] 5%|▌ | 28/520 [01:56<31:32, 3.85s/it] {'loss': 1.8795, 'grad_norm': 0.01103028336626142, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:56<31:32, 3.85s/it] 6%|▌ | 29/520 [02:00<31:33, 3.86s/it] {'loss': 1.9461, 'grad_norm': 0.01799647014812498, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:00<31:33, 3.86s/it] 6%|▌ | 30/520 [02:04<31:33, 3.86s/it] {'loss': 2.7851, 'grad_norm': 0.09528926140930072, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:04<31:33, 3.86s/it] 6%|▌ | 31/520 [02:08<31:32, 3.87s/it] {'loss': 2.0428, 'grad_norm': 0.022846754063781435, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:08<31:32, 3.87s/it] 6%|▌ | 32/520 [02:12<31:30, 3.87s/it] {'loss': 3.9798, 'grad_norm': 0.11691763481079272, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:12<31:30, 3.87s/it] 6%|▋ | 33/520 [02:16<31:24, 3.87s/it] {'loss': 2.5715, 'grad_norm': 0.11544869992246752, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:16<31:24, 3.87s/it] 7%|▋ | 34/520 [02:19<31:19, 3.87s/it] {'loss': 2.1188, 'grad_norm': 0.03617772327120424, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:19<31:19, 3.87s/it] 7%|▋ | 35/520 [02:23<31:13, 3.86s/it] {'loss': 2.0874, 'grad_norm': 0.026662099535834934, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:23<31:13, 3.86s/it] 7%|▋ | 36/520 [02:27<31:14, 3.87s/it] {'loss': 2.1872, 'grad_norm': 0.013091576829049236, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:27<31:14, 3.87s/it] 7%|▋ | 37/520 [02:31<31:08, 3.87s/it] {'loss': 2.6993, 'grad_norm': 0.029046322258259424, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:31<31:08, 3.87s/it] 7%|▋ | 38/520 [02:35<31:01, 3.86s/it] {'loss': 2.2565, 'grad_norm': 0.019473168406675255, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:35<31:01, 3.86s/it] 8%|▊ | 39/520 [02:39<30:51, 3.85s/it] {'loss': 1.9604, 'grad_norm': 0.012551587777480673, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:39<30:51, 3.85s/it] 8%|▊ | 40/520 [02:43<30:42, 3.84s/it] {'loss': 1.9906, 'grad_norm': 0.019723596328411013, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:43<30:42, 3.84s/it] 8%|▊ | 41/520 [02:46<30:40, 3.84s/it] {'loss': 1.942, 'grad_norm': 0.010460251156699329, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:46<30:40, 3.84s/it] 8%|▊ | 42/520 [02:50<30:35, 3.84s/it] {'loss': 2.0036, 'grad_norm': 0.011398014774541842, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:50<30:35, 3.84s/it] 8%|▊ | 43/520 [02:54<30:34, 3.85s/it] {'loss': 2.4885, 'grad_norm': 0.017609425858166695, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:54<30:34, 3.85s/it] 8%|▊ | 44/520 [02:58<30:42, 3.87s/it] {'loss': 2.5368, 'grad_norm': 0.020983695094876423, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:58<30:42, 3.87s/it] 9%|▊ | 45/520 [03:02<30:13, 3.82s/it] {'loss': 1.9282, 'grad_norm': 0.011725500176190994, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:02<30:13, 3.82s/it] 9%|▉ | 46/520 [03:05<29:48, 3.77s/it] {'loss': 2.5365, 'grad_norm': 0.017799567062701766, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:05<29:48, 3.77s/it] 9%|▉ | 47/520 [03:09<29:23, 3.73s/it] {'loss': 1.9242, 'grad_norm': 0.010069400168972594, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:09<29:23, 3.73s/it] 9%|▉ | 48/520 [03:13<29:01, 3.69s/it] {'loss': 1.8987, 'grad_norm': 0.01223374960875052, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:13<29:01, 3.69s/it] 9%|▉ | 49/520 [03:16<28:57, 3.69s/it] {'loss': 1.8788, 'grad_norm': 0.009691287655269224, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:16<28:57, 3.69s/it] 10%|▉ | 50/520 [03:20<28:52, 3.69s/it] {'loss': 1.8535, 'grad_norm': 0.0075094628111633255, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:20<28:52, 3.69s/it] 10%|▉ | 51/520 [03:24<28:40, 3.67s/it] {'loss': 1.7497, 'grad_norm': 0.008826355678986327, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:24<28:40, 3.67s/it] 10%|█ | 52/520 [03:27<28:29, 3.65s/it] {'loss': 1.9453, 'grad_norm': 0.009624023013889007, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:27<28:29, 3.65s/it] 10%|█ | 53/520 [03:31<28:24, 3.65s/it] {'loss': 1.9528, 'grad_norm': 0.008358522384482077, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:31<28:24, 3.65s/it] 10%|█ | 54/520 [03:35<28:23, 3.65s/it] {'loss': 1.7567, 'grad_norm': 0.010020082652254292, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:35<28:23, 3.65s/it] 11%|█ | 55/520 [03:38<28:26, 3.67s/it] {'loss': 1.7654, 'grad_norm': 0.008750811916650631, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:38<28:26, 3.67s/it] 11%|█ | 56/520 [03:42<28:39, 3.71s/it] {'loss': 1.9497, 'grad_norm': 0.009661893362539602, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:42<28:39, 3.71s/it] 11%|█ | 57/520 [03:46<28:33, 3.70s/it] {'loss': 1.7617, 'grad_norm': 0.008674262352990882, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:46<28:33, 3.70s/it] 11%|█ | 58/520 [03:49<28:24, 3.69s/it] {'loss': 1.911, 'grad_norm': 0.006579862434537025, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:49<28:24, 3.69s/it] 11%|█▏ | 59/520 [03:53<28:15, 3.68s/it] {'loss': 2.209, 'grad_norm': 0.015056995783484685, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:53<28:15, 3.68s/it] 12%|█▏ | 60/520 [03:57<28:08, 3.67s/it] {'loss': 1.8314, 'grad_norm': 0.00650656580535892, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:57<28:08, 3.67s/it] 12%|█▏ | 61/520 [04:00<28:02, 3.66s/it] {'loss': 2.3202, 'grad_norm': 0.012078853402227412, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:00<28:02, 3.66s/it] 12%|█▏ | 62/520 [04:04<27:56, 3.66s/it] {'loss': 1.7782, 'grad_norm': 0.006286939516349118, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:04<27:56, 3.66s/it] 12%|█▏ | 63/520 [04:08<27:55, 3.67s/it] {'loss': 1.7967, 'grad_norm': 0.006468355173839567, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:08<27:55, 3.67s/it] 12%|█▏ | 64/520 [04:11<27:55, 3.68s/it] {'loss': 1.8003, 'grad_norm': 0.006955302100402871, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:11<27:55, 3.68s/it] 12%|█▎ | 65/520 [04:15<27:56, 3.68s/it] {'loss': 1.8189, 'grad_norm': 0.007793929303588544, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:15<27:56, 3.68s/it] 13%|█▎ | 66/520 [04:19<27:51, 3.68s/it] {'loss': 1.7899, 'grad_norm': 0.006338893198470705, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:19<27:51, 3.68s/it] 13%|█▎ | 67/520 [04:22<27:48, 3.68s/it] {'loss': 1.621, 'grad_norm': 0.006782722315552139, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:22<27:48, 3.68s/it] 13%|█▎ | 68/520 [04:26<27:45, 3.68s/it] {'loss': 1.6426, 'grad_norm': 0.006882303007402807, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:26<27:45, 3.68s/it] 13%|█▎ | 69/520 [04:30<27:39, 3.68s/it] {'loss': 1.6257, 'grad_norm': 0.007844225556985267, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:30<27:39, 3.68s/it] 13%|█▎ | 70/520 [04:33<27:34, 3.68s/it] {'loss': 1.7287, 'grad_norm': 0.00751963402230228, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:33<27:34, 3.68s/it] 14%|█▎ | 71/520 [04:37<27:39, 3.70s/it] {'loss': 1.5943, 'grad_norm': 0.005637137403045846, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:37<27:39, 3.70s/it] 14%|█▍ | 72/520 [04:41<27:28, 3.68s/it] {'loss': 1.7756, 'grad_norm': 0.0067143980376289745, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:41<27:28, 3.68s/it] 14%|█▍ | 73/520 [04:45<27:24, 3.68s/it] {'loss': 1.5594, 'grad_norm': 0.00607003087704676, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:45<27:24, 3.68s/it] 14%|█▍ | 74/520 [04:48<27:13, 3.66s/it] {'loss': 1.7046, 'grad_norm': 0.006486784262054121, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:48<27:13, 3.66s/it] 14%|█▍ | 75/520 [04:52<27:06, 3.66s/it] {'loss': 1.5852, 'grad_norm': 0.006123371491915796, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:52<27:06, 3.66s/it] 15%|█▍ | 76/520 [04:55<27:04, 3.66s/it] {'loss': 2.2794, 'grad_norm': 0.009073629558481695, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:55<27:04, 3.66s/it] 15%|█▍ | 77/520 [04:59<27:01, 3.66s/it] {'loss': 1.5198, 'grad_norm': 0.006458820188865386, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:59<27:01, 3.66s/it] 15%|█▌ | 78/520 [05:03<26:56, 3.66s/it] {'loss': 1.6611, 'grad_norm': 0.006365543587089534, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:03<26:56, 3.66s/it] 15%|█▌ | 79/520 [05:06<27:00, 3.67s/it] {'loss': 1.6328, 'grad_norm': 0.005674123708843784, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:06<27:00, 3.67s/it] 15%|█▌ | 80/520 [05:10<26:57, 3.68s/it] {'loss': 2.2379, 'grad_norm': 0.007588816550951808, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:10<26:57, 3.68s/it] 16%|█▌ | 81/520 [05:14<26:51, 3.67s/it] {'loss': 1.8322, 'grad_norm': 0.00825805157488792, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:14<26:51, 3.67s/it] 16%|█▌ | 82/520 [05:17<26:44, 3.66s/it] {'loss': 1.7115, 'grad_norm': 0.005761414895718814, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:17<26:44, 3.66s/it] 16%|█▌ | 83/520 [05:21<26:43, 3.67s/it] {'loss': 1.7779, 'grad_norm': 0.006483127054090176, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:21<26:43, 3.67s/it] 16%|█▌ | 84/520 [05:25<26:36, 3.66s/it] {'loss': 1.7503, 'grad_norm': 0.006844979507285456, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:25<26:36, 3.66s/it] 16%|█▋ | 85/520 [05:28<26:33, 3.66s/it] {'loss': 1.7092, 'grad_norm': 0.005759869276253317, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:28<26:33, 3.66s/it] 17%|█▋ | 86/520 [05:32<26:31, 3.67s/it] {'loss': 1.7962, 'grad_norm': 0.005638289129340553, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:32<26:31, 3.67s/it] 17%|█▋ | 87/520 [05:36<26:31, 3.68s/it] {'loss': 2.1442, 'grad_norm': 0.009889888112047295, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:36<26:31, 3.68s/it] 17%|█▋ | 88/520 [05:40<26:28, 3.68s/it] {'loss': 2.3196, 'grad_norm': 0.008691596749244268, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:40<26:28, 3.68s/it] 17%|█▋ | 89/520 [05:43<26:20, 3.67s/it] {'loss': 1.6858, 'grad_norm': 0.006131890965951547, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:43<26:20, 3.67s/it] 17%|█▋ | 90/520 [05:47<26:25, 3.69s/it] {'loss': 1.5955, 'grad_norm': 0.006146273009756291, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:47<26:25, 3.69s/it] 18%|█▊ | 91/520 [05:51<26:17, 3.68s/it] {'loss': 1.7002, 'grad_norm': 0.0055669879470660565, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:51<26:17, 3.68s/it] 18%|█▊ | 92/520 [05:54<26:12, 3.67s/it] {'loss': 1.631, 'grad_norm': 0.006221023572049801, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:54<26:12, 3.67s/it] 18%|█▊ | 93/520 [05:58<26:08, 3.67s/it] {'loss': 1.6249, 'grad_norm': 0.006525143462949767, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:58<26:08, 3.67s/it] 18%|█▊ | 94/520 [06:02<25:58, 3.66s/it] {'loss': 1.7489, 'grad_norm': 0.007256429560900716, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:02<25:58, 3.66s/it] 18%|█▊ | 95/520 [06:05<25:58, 3.67s/it] {'loss': 1.5974, 'grad_norm': 0.005917549864625987, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:05<25:58, 3.67s/it] 18%|█▊ | 96/520 [06:09<25:50, 3.66s/it] {'loss': 1.5974, 'grad_norm': 0.006032494324228623, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:09<25:50, 3.66s/it] 19%|█▊ | 97/520 [06:12<25:43, 3.65s/it] {'loss': 1.5858, 'grad_norm': 0.007410904942790473, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:12<25:43, 3.65s/it] 19%|█▉ | 98/520 [06:16<25:47, 3.67s/it] {'loss': 1.5712, 'grad_norm': 0.004809316909963942, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:16<25:47, 3.67s/it] 19%|█▉ | 99/520 [06:20<25:47, 3.68s/it] {'loss': 1.6091, 'grad_norm': 0.006425310543532582, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:20<25:47, 3.68s/it] 19%|█▉ | 100/520 [06:24<25:45, 3.68s/it] {'loss': 1.9153, 'grad_norm': 0.00926873460418573, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:24<25:45, 3.68s/it] 19%|█▉ | 101/520 [06:27<25:48, 3.70s/it] {'loss': 1.5847, 'grad_norm': 0.005496340009401917, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:27<25:48, 3.70s/it] 20%|█▉ | 102/520 [06:31<25:46, 3.70s/it] {'loss': 1.5988, 'grad_norm': 0.006436753222343074, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:31<25:46, 3.70s/it] 20%|█▉ | 103/520 [06:35<25:40, 3.69s/it] {'loss': 1.5284, 'grad_norm': 0.004836614276993057, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:35<25:40, 3.69s/it] 20%|██ | 104/520 [06:38<25:33, 3.69s/it] {'loss': 1.6133, 'grad_norm': 0.006001725389554084, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:38<25:33, 3.69s/it] 20%|██ | 105/520 [06:42<25:26, 3.68s/it] {'loss': 1.5959, 'grad_norm': 0.005193530134522931, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:42<25:26, 3.68s/it] 20%|██ | 106/520 [06:46<25:22, 3.68s/it] {'loss': 1.875, 'grad_norm': 0.007685200282807844, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:46<25:22, 3.68s/it] 21%|██ | 107/520 [06:49<25:16, 3.67s/it] {'loss': 1.8843, 'grad_norm': 0.00781604789811618, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:49<25:16, 3.67s/it] 21%|██ | 108/520 [06:53<25:14, 3.68s/it] {'loss': 1.5415, 'grad_norm': 0.005155970259634548, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:53<25:14, 3.68s/it] 21%|██ | 109/520 [06:57<25:10, 3.68s/it] {'loss': 1.8583, 'grad_norm': 0.0063587766689720306, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:57<25:10, 3.68s/it] 21%|██ | 110/520 [07:00<25:06, 3.67s/it] {'loss': 1.7482, 'grad_norm': 0.005617402747663627, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:00<25:06, 3.67s/it] 21%|██▏ | 111/520 [07:04<25:00, 3.67s/it] {'loss': 1.7599, 'grad_norm': 0.005936849537337962, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:04<25:00, 3.67s/it] 22%|██▏ | 112/520 [07:08<25:10, 3.70s/it] {'loss': 1.6249, 'grad_norm': 0.005170789300795006, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:08<25:10, 3.70s/it] 22%|██▏ | 113/520 [07:12<25:28, 3.75s/it] {'loss': 1.4635, 'grad_norm': 0.0048492260603700986, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:12<25:28, 3.75s/it] 22%|██▏ | 114/520 [07:15<25:23, 3.75s/it] {'loss': 1.5888, 'grad_norm': 0.005140913509458575, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:15<25:23, 3.75s/it] 22%|██▏ | 115/520 [07:19<25:10, 3.73s/it] {'loss': 1.7425, 'grad_norm': 0.004906899608355765, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:19<25:10, 3.73s/it] 22%|██▏ | 116/520 [07:23<24:57, 3.71s/it] {'loss': 1.7097, 'grad_norm': 0.005250093492777132, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:23<24:57, 3.71s/it] 22%|██▎ | 117/520 [07:26<24:48, 3.69s/it] {'loss': 1.7046, 'grad_norm': 0.005902480294001739, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:26<24:48, 3.69s/it] 23%|██▎ | 118/520 [07:30<24:38, 3.68s/it] {'loss': 1.5495, 'grad_norm': 0.004793922126116443, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:30<24:38, 3.68s/it] 23%|██▎ | 119/520 [07:34<24:33, 3.67s/it] {'loss': 1.4893, 'grad_norm': 0.005197793762394487, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:34<24:33, 3.67s/it] 23%|██▎ | 120/520 [07:37<24:33, 3.68s/it] {'loss': 1.5262, 'grad_norm': 0.005565055253834352, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:37<24:33, 3.68s/it] 23%|██▎ | 121/520 [07:41<24:28, 3.68s/it] {'loss': 1.5893, 'grad_norm': 0.0050526165165090935, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:41<24:28, 3.68s/it] 23%|██▎ | 122/520 [07:45<24:29, 3.69s/it] {'loss': 1.4589, 'grad_norm': 0.0045237155043925045, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:45<24:29, 3.69s/it] 24%|██▎ | 123/520 [07:48<24:24, 3.69s/it] {'loss': 1.9248, 'grad_norm': 0.008540487821708713, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:49<24:24, 3.69s/it] 24%|██▍ | 124/520 [07:52<24:18, 3.68s/it] {'loss': 1.577, 'grad_norm': 0.005550720875182351, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:52<24:18, 3.68s/it] 24%|██▍ | 125/520 [07:56<24:15, 3.68s/it] {'loss': 1.5472, 'grad_norm': 0.004951918647335387, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:56<24:15, 3.68s/it] 24%|██▍ | 126/520 [08:00<25:26, 3.87s/it] {'loss': 1.7663, 'grad_norm': 0.005019656076745518, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:00<25:26, 3.87s/it] 24%|██▍ | 127/520 [08:04<24:58, 3.81s/it] {'loss': 1.5226, 'grad_norm': 0.00575480919346591, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:04<24:58, 3.81s/it] 25%|██▍ | 128/520 [08:08<24:37, 3.77s/it] {'loss': 1.5902, 'grad_norm': 0.004917547973044605, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:08<24:37, 3.77s/it] 25%|██▍ | 129/520 [08:11<24:20, 3.74s/it] {'loss': 1.4809, 'grad_norm': 0.004043355979832347, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:11<24:20, 3.74s/it] 25%|██▌ | 130/520 [08:15<24:12, 3.73s/it] {'loss': 1.5783, 'grad_norm': 0.004399303010230909, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:15<24:12, 3.73s/it] 25%|██▌ | 131/520 [08:19<24:01, 3.71s/it] {'loss': 1.7353, 'grad_norm': 0.006399833581263826, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:19<24:01, 3.71s/it] 25%|██▌ | 132/520 [08:22<23:52, 3.69s/it] {'loss': 1.6175, 'grad_norm': 0.005713637939000021, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:22<23:52, 3.69s/it] 26%|██▌ | 133/520 [08:26<23:51, 3.70s/it] {'loss': 1.4962, 'grad_norm': 0.005044973235891451, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:26<23:51, 3.70s/it] 26%|██▌ | 134/520 [08:30<23:42, 3.68s/it] {'loss': 1.5925, 'grad_norm': 0.004801269562217831, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:30<23:42, 3.68s/it] 26%|██▌ | 135/520 [08:33<23:35, 3.68s/it] {'loss': 1.6737, 'grad_norm': 0.005387041993485148, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:33<23:35, 3.68s/it] 26%|██▌ | 136/520 [08:37<23:30, 3.67s/it] {'loss': 1.5784, 'grad_norm': 0.005024498947231373, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:37<23:30, 3.67s/it] 26%|██▋ | 137/520 [08:41<23:24, 3.67s/it] {'loss': 1.4967, 'grad_norm': 0.006220623668768303, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:41<23:24, 3.67s/it] 27%|██▋ | 138/520 [08:44<23:18, 3.66s/it] {'loss': 1.4929, 'grad_norm': 0.005658248075136454, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:44<23:18, 3.66s/it] 27%|██▋ | 139/520 [08:48<23:19, 3.67s/it] {'loss': 1.6312, 'grad_norm': 0.005956281865656259, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:48<23:19, 3.67s/it] 27%|██▋ | 140/520 [08:52<23:20, 3.69s/it] {'loss': 1.772, 'grad_norm': 0.006469647788458027, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:52<23:20, 3.69s/it] 27%|██▋ | 141/520 [08:55<23:15, 3.68s/it] {'loss': 1.6431, 'grad_norm': 0.0060530454373146, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:55<23:15, 3.68s/it] 27%|██▋ | 142/520 [08:59<23:13, 3.69s/it] {'loss': 1.8152, 'grad_norm': 0.00509331527167709, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [08:59<23:13, 3.69s/it] 28%|██▊ | 143/520 [09:03<23:09, 3.69s/it] {'loss': 1.5602, 'grad_norm': 0.007447344211346964, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:03<23:09, 3.69s/it] 28%|██▊ | 144/520 [09:06<23:07, 3.69s/it] {'loss': 1.4759, 'grad_norm': 0.005559416343768544, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:06<23:07, 3.69s/it] 28%|██▊ | 145/520 [09:10<23:03, 3.69s/it] {'loss': 1.4161, 'grad_norm': 0.004606230508209358, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:10<23:03, 3.69s/it] 28%|██▊ | 146/520 [09:14<23:03, 3.70s/it] {'loss': 1.8688, 'grad_norm': 0.007502695976241852, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:14<23:03, 3.70s/it] 28%|██▊ | 147/520 [09:17<23:02, 3.71s/it] {'loss': 1.4582, 'grad_norm': 0.0055954202599542, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:17<23:02, 3.71s/it] 28%|██▊ | 148/520 [09:21<23:00, 3.71s/it] {'loss': 1.5089, 'grad_norm': 0.004892260283663369, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:21<23:00, 3.71s/it] 29%|██▊ | 149/520 [09:25<22:54, 3.70s/it] {'loss': 1.4744, 'grad_norm': 0.005952747449044273, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:25<22:54, 3.70s/it] 29%|██▉ | 150/520 [09:29<22:47, 3.70s/it] {'loss': 1.6992, 'grad_norm': 0.007129414842670221, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:29<22:47, 3.70s/it] 29%|██▉ | 151/520 [09:32<22:40, 3.69s/it] {'loss': 1.4788, 'grad_norm': 0.005405919782733328, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:32<22:40, 3.69s/it] 29%|██▉ | 152/520 [09:36<22:40, 3.70s/it] {'loss': 1.4468, 'grad_norm': 0.007402115742714501, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:36<22:40, 3.70s/it] 29%|██▉ | 153/520 [09:40<22:44, 3.72s/it] {'loss': 1.4864, 'grad_norm': 0.006645668179349846, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:40<22:44, 3.72s/it] 30%|██▉ | 154/520 [09:44<22:51, 3.75s/it] {'loss': 1.5835, 'grad_norm': 0.005008731557299328, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:44<22:51, 3.75s/it] 30%|██▉ | 155/520 [09:47<22:45, 3.74s/it] {'loss': 1.47, 'grad_norm': 0.006365681220853894, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:47<22:45, 3.74s/it] 30%|███ | 156/520 [09:51<22:37, 3.73s/it] {'loss': 1.5333, 'grad_norm': 0.005960576634600708, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:51<22:37, 3.73s/it] 30%|███ | 157/520 [09:55<22:27, 3.71s/it] {'loss': 1.8901, 'grad_norm': 0.008252756319555514, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:55<22:27, 3.71s/it] 30%|███ | 158/520 [09:58<22:23, 3.71s/it] {'loss': 1.4868, 'grad_norm': 0.007884851031080673, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [09:58<22:23, 3.71s/it] 31%|███ | 159/520 [10:02<22:17, 3.70s/it] {'loss': 1.5034, 'grad_norm': 0.005336508973547418, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:02<22:17, 3.70s/it] 31%|███ | 160/520 [10:06<22:12, 3.70s/it] {'loss': 1.5482, 'grad_norm': 0.005015756479456027, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:06<22:12, 3.70s/it] 31%|███ | 161/520 [10:09<22:03, 3.69s/it] {'loss': 1.545, 'grad_norm': 0.006109665822620169, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:09<22:03, 3.69s/it] 31%|███ | 162/520 [10:13<21:58, 3.68s/it] {'loss': 1.7687, 'grad_norm': 0.005769566779960477, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:13<21:58, 3.68s/it] 31%|███▏ | 163/520 [10:17<22:07, 3.72s/it] {'loss': 1.3814, 'grad_norm': 0.005595274723238912, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:17<22:07, 3.72s/it] 32%|███▏ | 164/520 [10:21<22:15, 3.75s/it] {'loss': 1.3589, 'grad_norm': 0.005343601635334363, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:21<22:15, 3.75s/it] 32%|███▏ | 165/520 [10:25<22:19, 3.77s/it] {'loss': 1.493, 'grad_norm': 0.0052157532512231005, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:25<22:19, 3.77s/it] 32%|███▏ | 166/520 [10:28<22:19, 3.79s/it] {'loss': 1.5118, 'grad_norm': 0.00519979051369653, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:28<22:19, 3.79s/it] 32%|███▏ | 167/520 [10:32<22:25, 3.81s/it] {'loss': 1.4985, 'grad_norm': 0.0060827983927682775, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:32<22:25, 3.81s/it] 32%|███▏ | 168/520 [10:36<22:24, 3.82s/it] {'loss': 1.4334, 'grad_norm': 0.004868833285416989, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:36<22:24, 3.82s/it] 32%|███▎ | 169/520 [10:40<22:21, 3.82s/it] {'loss': 1.5123, 'grad_norm': 0.004173528216600842, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:40<22:21, 3.82s/it] 33%|███▎ | 170/520 [10:44<22:20, 3.83s/it] {'loss': 1.6689, 'grad_norm': 0.005348027152174098, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:44<22:20, 3.83s/it] 33%|███▎ | 171/520 [10:48<22:17, 3.83s/it] {'loss': 1.4489, 'grad_norm': 0.005563877209581415, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:48<22:17, 3.83s/it] 33%|███▎ | 172/520 [10:51<22:12, 3.83s/it] {'loss': 1.5146, 'grad_norm': 0.0051611950033772815, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:51<22:12, 3.83s/it] 33%|███▎ | 173/520 [10:55<22:11, 3.84s/it] {'loss': 1.4339, 'grad_norm': 0.005001162247443302, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:55<22:11, 3.84s/it] 33%|███▎ | 174/520 [10:59<22:09, 3.84s/it] {'loss': 1.5346, 'grad_norm': 0.005700041922133414, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [10:59<22:09, 3.84s/it] 34%|███▎ | 175/520 [11:03<22:08, 3.85s/it] {'loss': 1.427, 'grad_norm': 0.004494125557405401, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:03<22:08, 3.85s/it] 34%|███▍ | 176/520 [11:07<22:06, 3.85s/it] {'loss': 1.7576, 'grad_norm': 0.005262736371945192, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:07<22:06, 3.85s/it] 34%|███▍ | 177/520 [11:11<21:59, 3.85s/it] {'loss': 1.602, 'grad_norm': 0.006214516207903296, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:11<21:59, 3.85s/it] 34%|███▍ | 178/520 [11:14<21:53, 3.84s/it] {'loss': 1.4973, 'grad_norm': 0.004875558087330114, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:14<21:53, 3.84s/it] 34%|███▍ | 179/520 [11:18<21:49, 3.84s/it] {'loss': 1.5873, 'grad_norm': 0.004481984890193558, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:18<21:49, 3.84s/it] 35%|███▍ | 180/520 [11:22<21:46, 3.84s/it] {'loss': 1.4766, 'grad_norm': 0.005171604139646449, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:22<21:46, 3.84s/it] 35%|███▍ | 181/520 [11:26<21:42, 3.84s/it] {'loss': 1.4571, 'grad_norm': 0.0050024052035730114, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:26<21:42, 3.84s/it] 35%|███▌ | 182/520 [11:30<21:39, 3.84s/it] {'loss': 1.4694, 'grad_norm': 0.004706124189474129, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:30<21:39, 3.84s/it] 35%|███▌ | 183/520 [11:34<21:34, 3.84s/it] {'loss': 1.5111, 'grad_norm': 0.005555692969686081, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:34<21:34, 3.84s/it] 35%|███▌ | 184/520 [11:38<21:32, 3.85s/it] {'loss': 1.3918, 'grad_norm': 0.004412444474029941, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:38<21:32, 3.85s/it] 36%|███▌ | 185/520 [11:41<21:29, 3.85s/it] {'loss': 1.6051, 'grad_norm': 0.004247647634871608, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:41<21:29, 3.85s/it] 36%|███▌ | 186/520 [11:45<21:24, 3.84s/it] {'loss': 1.4044, 'grad_norm': 0.004232369801202225, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:45<21:24, 3.84s/it] 36%|███▌ | 187/520 [11:49<21:30, 3.87s/it] {'loss': 1.4403, 'grad_norm': 0.00511603112514592, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:49<21:30, 3.87s/it] 36%|███▌ | 188/520 [11:53<21:31, 3.89s/it] {'loss': 1.5008, 'grad_norm': 0.004892682343036956, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:53<21:31, 3.89s/it] 36%|███▋ | 189/520 [11:57<21:32, 3.91s/it] {'loss': 1.5328, 'grad_norm': 0.004212785353003493, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:57<21:32, 3.91s/it] 37%|███▋ | 190/520 [12:01<21:25, 3.90s/it] {'loss': 1.4234, 'grad_norm': 0.004723959031828125, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:01<21:25, 3.90s/it] 37%|███▋ | 191/520 [12:05<21:23, 3.90s/it] {'loss': 1.4002, 'grad_norm': 0.004646956839225154, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:05<21:23, 3.90s/it] 37%|███▋ | 192/520 [12:09<21:22, 3.91s/it] {'loss': 1.4915, 'grad_norm': 0.004253224810817914, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:09<21:22, 3.91s/it] 37%|███▋ | 193/520 [12:13<21:19, 3.91s/it] {'loss': 1.6992, 'grad_norm': 0.0058423521012853806, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:13<21:19, 3.91s/it] 37%|███▋ | 194/520 [12:17<21:19, 3.92s/it] {'loss': 1.5461, 'grad_norm': 0.004619835494023669, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:17<21:19, 3.92s/it] 38%|███▊ | 195/520 [12:21<21:14, 3.92s/it] {'loss': 1.4998, 'grad_norm': 0.004299464736315168, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:21<21:14, 3.92s/it] 38%|███▊ | 196/520 [12:24<21:11, 3.92s/it] {'loss': 1.4619, 'grad_norm': 0.004827916743519038, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:24<21:11, 3.92s/it] 38%|███▊ | 197/520 [12:28<21:07, 3.92s/it] {'loss': 1.4213, 'grad_norm': 0.0048294083131712705, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:28<21:07, 3.92s/it] 38%|███▊ | 198/520 [12:32<21:00, 3.92s/it] {'loss': 1.5139, 'grad_norm': 0.004788505321510833, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:32<21:00, 3.92s/it] 38%|███▊ | 199/520 [12:36<20:59, 3.92s/it] {'loss': 1.4064, 'grad_norm': 0.004612245142071337, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:36<20:59, 3.92s/it] 38%|███▊ | 200/520 [12:40<20:55, 3.92s/it] {'loss': 1.6091, 'grad_norm': 0.0055105600141440786, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:40<20:55, 3.92s/it] 39%|███▊ | 201/520 [12:44<20:50, 3.92s/it] {'loss': 1.5996, 'grad_norm': 0.004628197629357718, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:44<20:50, 3.92s/it] 39%|███▉ | 202/520 [12:48<21:07, 3.99s/it] {'loss': 1.4047, 'grad_norm': 0.004376247655143083, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:48<21:07, 3.99s/it] 39%|███▉ | 203/520 [12:52<21:16, 4.03s/it] {'loss': 1.4559, 'grad_norm': 0.004421826281741737, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:52<21:16, 4.03s/it] 39%|███▉ | 204/520 [12:56<21:24, 4.07s/it] {'loss': 1.5108, 'grad_norm': 0.00478837315407331, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:57<21:24, 4.07s/it] 39%|███▉ | 205/520 [13:01<21:29, 4.09s/it] {'loss': 1.6118, 'grad_norm': 0.005288381509011333, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:01<21:29, 4.09s/it] 40%|███▉ | 206/520 [13:05<21:12, 4.05s/it] {'loss': 1.5565, 'grad_norm': 0.004715004887164223, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:05<21:12, 4.05s/it] 40%|███▉ | 207/520 [13:09<20:53, 4.01s/it] {'loss': 1.5948, 'grad_norm': 0.0043119130715796586, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:09<20:53, 4.01s/it] 40%|████ | 208/520 [13:12<20:33, 3.95s/it] {'loss': 1.5002, 'grad_norm': 0.004853549002338477, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:12<20:33, 3.95s/it] 40%|████ | 209/520 [13:16<20:06, 3.88s/it] {'loss': 1.4331, 'grad_norm': 0.004443950557092876, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:16<20:06, 3.88s/it] 40%|████ | 210/520 [13:20<19:54, 3.85s/it] {'loss': 1.5047, 'grad_norm': 0.004894877246917889, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:20<19:54, 3.85s/it] 41%|████ | 211/520 [13:24<19:37, 3.81s/it] {'loss': 1.5253, 'grad_norm': 0.004086219047356642, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:24<19:37, 3.81s/it] 41%|████ | 212/520 [13:27<19:34, 3.81s/it] {'loss': 1.4784, 'grad_norm': 0.004818348794612124, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:27<19:34, 3.81s/it] 41%|████ | 213/520 [13:31<19:19, 3.78s/it] {'loss': 1.4555, 'grad_norm': 0.004874086302311682, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:31<19:19, 3.78s/it] 41%|████ | 214/520 [13:35<19:07, 3.75s/it] {'loss': 1.4503, 'grad_norm': 0.004795072596231419, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:35<19:07, 3.75s/it] 41%|████▏ | 215/520 [13:38<18:55, 3.72s/it] {'loss': 1.5028, 'grad_norm': 0.004527090394804772, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:38<18:55, 3.72s/it] 42%|████▏ | 216/520 [13:42<18:50, 3.72s/it] {'loss': 1.3315, 'grad_norm': 0.004195137278325452, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:42<18:50, 3.72s/it] 42%|████▏ | 217/520 [13:46<18:46, 3.72s/it] {'loss': 1.4818, 'grad_norm': 0.004453514214445455, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:46<18:46, 3.72s/it] 42%|████▏ | 218/520 [13:50<18:50, 3.75s/it] {'loss': 1.4734, 'grad_norm': 0.004719449613140748, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:50<18:50, 3.75s/it] 42%|████▏ | 219/520 [13:53<18:42, 3.73s/it] {'loss': 1.4225, 'grad_norm': 0.003954629944834352, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:53<18:42, 3.73s/it] 42%|████▏ | 220/520 [13:57<18:44, 3.75s/it] {'loss': 1.5653, 'grad_norm': 0.005763533840976126, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:57<18:44, 3.75s/it] 42%|████▎ | 221/520 [14:01<18:39, 3.74s/it] {'loss': 1.4714, 'grad_norm': 0.004622457465804002, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:01<18:39, 3.74s/it] 43%|████▎ | 222/520 [14:05<18:34, 3.74s/it] {'loss': 1.366, 'grad_norm': 0.005078649874542821, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:05<18:34, 3.74s/it] 43%|████▎ | 223/520 [14:08<18:23, 3.72s/it] {'loss': 1.3539, 'grad_norm': 0.003922916737751281, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:08<18:23, 3.72s/it] 43%|████▎ | 224/520 [14:12<18:19, 3.72s/it] {'loss': 1.9061, 'grad_norm': 0.00674554926206332, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:12<18:19, 3.72s/it] 43%|████▎ | 225/520 [14:16<18:15, 3.71s/it] {'loss': 1.375, 'grad_norm': 0.005354448062200771, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:16<18:15, 3.71s/it] 43%|████▎ | 226/520 [14:19<18:07, 3.70s/it] {'loss': 1.4871, 'grad_norm': 0.00470037396251517, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:19<18:07, 3.70s/it] 44%|████▎ | 227/520 [14:23<18:03, 3.70s/it] {'loss': 1.4687, 'grad_norm': 0.004192875505362489, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:23<18:03, 3.70s/it] 44%|████▍ | 228/520 [14:27<17:55, 3.68s/it] {'loss': 1.7619, 'grad_norm': 0.0062353566401030895, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:27<17:55, 3.68s/it] 44%|████▍ | 229/520 [14:30<17:50, 3.68s/it] {'loss': 1.4547, 'grad_norm': 0.003863935947016052, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:30<17:50, 3.68s/it] 44%|████▍ | 230/520 [14:34<17:52, 3.70s/it] {'loss': 1.3139, 'grad_norm': 0.004593264904060576, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:34<17:52, 3.70s/it] 44%|████▍ | 231/520 [14:38<17:51, 3.71s/it] {'loss': 1.3822, 'grad_norm': 0.0038674810842487376, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:38<17:51, 3.71s/it] 45%|████▍ | 232/520 [14:41<17:43, 3.69s/it] {'loss': 1.7692, 'grad_norm': 0.00540772718771853, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:41<17:43, 3.69s/it] 45%|████▍ | 233/520 [14:45<17:42, 3.70s/it] {'loss': 1.6036, 'grad_norm': 0.004832954430691564, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:45<17:42, 3.70s/it] 45%|████▌ | 234/520 [14:49<17:36, 3.70s/it] {'loss': 1.324, 'grad_norm': 0.004371171053120246, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:49<17:36, 3.70s/it] 45%|████▌ | 235/520 [14:53<17:31, 3.69s/it] {'loss': 1.3729, 'grad_norm': 0.004937099185593487, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:53<17:31, 3.69s/it] 45%|████▌ | 236/520 [14:56<17:41, 3.74s/it] {'loss': 1.528, 'grad_norm': 0.00402281391302097, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:56<17:41, 3.74s/it] 46%|████▌ | 237/520 [15:00<17:37, 3.74s/it] {'loss': 1.4629, 'grad_norm': 0.0040760944224611274, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:00<17:37, 3.74s/it] 46%|████▌ | 238/520 [15:04<17:42, 3.77s/it] {'loss': 1.4064, 'grad_norm': 0.004198827499876028, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:04<17:42, 3.77s/it] 46%|████▌ | 239/520 [15:08<17:37, 3.76s/it] {'loss': 1.5349, 'grad_norm': 0.004932991853780982, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:08<17:37, 3.76s/it] 46%|████▌ | 240/520 [15:11<17:26, 3.74s/it] {'loss': 1.2561, 'grad_norm': 0.0039406453420787295, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:11<17:26, 3.74s/it] 46%|████▋ | 241/520 [15:15<17:19, 3.73s/it] {'loss': 1.347, 'grad_norm': 0.003953870012094818, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:15<17:19, 3.73s/it] 47%|████▋ | 242/520 [15:19<17:13, 3.72s/it] {'loss': 1.3793, 'grad_norm': 0.003988784850068414, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:19<17:13, 3.72s/it] 47%|████▋ | 243/520 [15:22<17:06, 3.71s/it] {'loss': 1.3651, 'grad_norm': 0.004214688715615006, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:22<17:06, 3.71s/it] 47%|████▋ | 244/520 [15:26<17:10, 3.73s/it] {'loss': 1.5325, 'grad_norm': 0.004754863254807425, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:26<17:10, 3.73s/it] 47%|████▋ | 245/520 [15:30<17:10, 3.75s/it] {'loss': 1.3621, 'grad_norm': 0.00465869434659903, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:30<17:10, 3.75s/it] 47%|████▋ | 246/520 [15:34<17:15, 3.78s/it] {'loss': 1.7216, 'grad_norm': 0.004686204602962905, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:34<17:15, 3.78s/it] 48%|████▊ | 247/520 [15:38<17:20, 3.81s/it] {'loss': 1.5395, 'grad_norm': 0.004260060102308016, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:38<17:20, 3.81s/it] 48%|████▊ | 248/520 [15:42<17:17, 3.82s/it] {'loss': 1.3472, 'grad_norm': 0.00448800200936247, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:42<17:17, 3.82s/it] 48%|████▊ | 249/520 [15:46<17:17, 3.83s/it] {'loss': 1.4815, 'grad_norm': 0.00521620685640256, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:46<17:17, 3.83s/it] 48%|████▊ | 250/520 [15:49<17:16, 3.84s/it] {'loss': 1.4188, 'grad_norm': 0.004949227935753555, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:49<17:16, 3.84s/it] 48%|████▊ | 251/520 [15:53<17:13, 3.84s/it] {'loss': 1.4743, 'grad_norm': 0.003821066388589883, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:53<17:13, 3.84s/it] 48%|████▊ | 252/520 [15:57<17:08, 3.84s/it] {'loss': 1.5808, 'grad_norm': 0.0049042806050329305, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:57<17:08, 3.84s/it] 49%|████▊ | 253/520 [16:01<17:05, 3.84s/it] {'loss': 1.477, 'grad_norm': 0.005347207439733712, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:01<17:05, 3.84s/it] 49%|████▉ | 254/520 [16:05<17:01, 3.84s/it] {'loss': 1.3664, 'grad_norm': 0.0038666762425494487, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:05<17:01, 3.84s/it] 49%|████▉ | 255/520 [16:09<16:58, 3.84s/it] {'loss': 1.3992, 'grad_norm': 0.00450641818879902, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:09<16:58, 3.84s/it] 49%|████▉ | 256/520 [16:12<16:54, 3.84s/it] {'loss': 1.4297, 'grad_norm': 0.004320581904371756, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:12<16:54, 3.84s/it] 49%|████▉ | 257/520 [16:16<16:54, 3.86s/it] {'loss': 1.4458, 'grad_norm': 0.004174639195086949, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:16<16:54, 3.86s/it] 50%|████▉ | 258/520 [16:20<16:48, 3.85s/it] {'loss': 1.4591, 'grad_norm': 0.003760343184477982, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:20<16:48, 3.85s/it] 50%|████▉ | 259/520 [16:24<16:42, 3.84s/it] {'loss': 1.5214, 'grad_norm': 0.0052313806987411525, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:24<16:42, 3.84s/it] 50%|█████ | 260/520 [16:28<16:40, 3.85s/it] {'loss': 1.6858, 'grad_norm': 0.00473609232971017, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:28<16:40, 3.85s/it] 50%|█████ | 261/520 [16:32<16:37, 3.85s/it] {'loss': 1.5924, 'grad_norm': 0.005113685911821807, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:32<16:37, 3.85s/it] 50%|█████ | 262/520 [16:36<16:33, 3.85s/it] {'loss': 1.3476, 'grad_norm': 0.00467103422239099, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:36<16:33, 3.85s/it] 51%|█████ | 263/520 [16:39<16:26, 3.84s/it] {'loss': 1.6019, 'grad_norm': 0.004746034574870192, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:39<16:26, 3.84s/it] 51%|█████ | 264/520 [16:43<16:21, 3.83s/it] {'loss': 1.4789, 'grad_norm': 0.004298536660590562, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:43<16:21, 3.83s/it] 51%|█████ | 265/520 [16:47<16:17, 3.84s/it] {'loss': 1.3525, 'grad_norm': 0.005259068818157866, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:47<16:17, 3.84s/it] 51%|█████ | 266/520 [16:51<16:13, 3.83s/it] {'loss': 1.1961, 'grad_norm': 0.00356680261759223, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:51<16:13, 3.83s/it] 51%|█████▏ | 267/520 [16:55<16:10, 3.83s/it] {'loss': 1.3669, 'grad_norm': 0.00455839331738255, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:55<16:10, 3.83s/it] 52%|█████▏ | 268/520 [16:59<16:06, 3.84s/it] {'loss': 1.7302, 'grad_norm': 0.0056293954174197365, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:59<16:06, 3.84s/it] 52%|█████▏ | 269/520 [17:02<16:02, 3.83s/it] {'loss': 1.4726, 'grad_norm': 0.00416862565261479, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:02<16:02, 3.83s/it] 52%|█████▏ | 270/520 [17:06<16:02, 3.85s/it] {'loss': 1.5082, 'grad_norm': 0.004002658712099744, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:06<16:02, 3.85s/it] 52%|█████▏ | 271/520 [17:10<15:57, 3.84s/it] {'loss': 1.4815, 'grad_norm': 0.0046662469046045525, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:10<15:57, 3.84s/it] 52%|█████▏ | 272/520 [17:14<15:55, 3.85s/it] {'loss': 1.5159, 'grad_norm': 0.004680298198914029, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:14<15:55, 3.85s/it] 52%|█████▎ | 273/520 [17:18<15:46, 3.83s/it] {'loss': 1.6438, 'grad_norm': 0.005180065461398017, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:18<15:46, 3.83s/it] 53%|█████▎ | 274/520 [17:21<15:27, 3.77s/it] {'loss': 1.4175, 'grad_norm': 0.004936669579640293, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:21<15:27, 3.77s/it] 53%|█████▎ | 275/520 [17:25<15:17, 3.74s/it] {'loss': 1.3567, 'grad_norm': 0.004699691563291652, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:25<15:17, 3.74s/it] 53%|█████▎ | 276/520 [17:29<15:07, 3.72s/it] {'loss': 1.4544, 'grad_norm': 0.0044712444260648024, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:29<15:07, 3.72s/it] 53%|█████▎ | 277/520 [17:32<15:03, 3.72s/it] {'loss': 1.6288, 'grad_norm': 0.004482054436001545, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:32<15:03, 3.72s/it] 53%|█████▎ | 278/520 [17:36<14:55, 3.70s/it] {'loss': 1.2974, 'grad_norm': 0.00394857091967785, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:36<14:55, 3.70s/it] 54%|█████▎ | 279/520 [17:40<14:48, 3.69s/it] {'loss': 1.5388, 'grad_norm': 0.0052255718056490475, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:40<14:48, 3.69s/it] 54%|█████▍ | 280/520 [17:43<14:43, 3.68s/it] {'loss': 1.3618, 'grad_norm': 0.004605255141863188, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:43<14:43, 3.68s/it] 54%|█████▍ | 281/520 [17:47<14:39, 3.68s/it] {'loss': 1.4774, 'grad_norm': 0.0046082184598875775, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:47<14:39, 3.68s/it] 54%|█████▍ | 282/520 [17:51<14:34, 3.68s/it] {'loss': 1.3177, 'grad_norm': 0.004193308393868001, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:51<14:34, 3.68s/it] 54%|█████▍ | 283/520 [17:54<14:29, 3.67s/it] {'loss': 1.5177, 'grad_norm': 0.0045030985009881916, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:54<14:29, 3.67s/it] 55%|█████▍ | 284/520 [17:58<14:24, 3.66s/it] {'loss': 1.5002, 'grad_norm': 0.004641859334948115, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:58<14:24, 3.66s/it] 55%|█████▍ | 285/520 [18:02<14:20, 3.66s/it] {'loss': 1.3541, 'grad_norm': 0.004308841892279727, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:02<14:20, 3.66s/it] 55%|█████▌ | 286/520 [18:05<14:17, 3.66s/it] {'loss': 1.2017, 'grad_norm': 0.004439966228650147, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:05<14:17, 3.66s/it] 55%|█████▌ | 287/520 [18:09<14:14, 3.67s/it] {'loss': 1.4759, 'grad_norm': 0.004927936855868018, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:09<14:14, 3.67s/it] 55%|█████▌ | 288/520 [18:13<14:09, 3.66s/it] {'loss': 1.5261, 'grad_norm': 0.004206387476590774, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:13<14:09, 3.66s/it] 56%|█████▌ | 289/520 [18:16<14:07, 3.67s/it] {'loss': 1.351, 'grad_norm': 0.004045806312215578, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:16<14:07, 3.67s/it] 56%|█████▌ | 290/520 [18:20<14:03, 3.67s/it] {'loss': 1.2698, 'grad_norm': 0.004003169058424341, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:20<14:03, 3.67s/it] 56%|█████▌ | 291/520 [18:24<13:59, 3.66s/it] {'loss': 1.3316, 'grad_norm': 0.004546912916870911, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:24<13:59, 3.66s/it] 56%|█████▌ | 292/520 [18:27<13:54, 3.66s/it] {'loss': 1.3897, 'grad_norm': 0.003899415745392585, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:27<13:54, 3.66s/it] 56%|█████▋ | 293/520 [18:31<13:52, 3.67s/it] {'loss': 1.3241, 'grad_norm': 0.004535499854080768, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:31<13:52, 3.67s/it] 57%|█████▋ | 294/520 [18:35<13:49, 3.67s/it] {'loss': 1.357, 'grad_norm': 0.0042830499969061945, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:35<13:49, 3.67s/it] 57%|█████▋ | 295/520 [18:38<13:46, 3.67s/it] {'loss': 1.6032, 'grad_norm': 0.004893581567795843, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:38<13:46, 3.67s/it] 57%|█████▋ | 296/520 [18:42<13:40, 3.66s/it] {'loss': 1.2874, 'grad_norm': 0.003994433520496125, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:42<13:40, 3.66s/it] 57%|█████▋ | 297/520 [18:46<13:35, 3.66s/it] {'loss': 1.4398, 'grad_norm': 0.004561090334984471, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:46<13:35, 3.66s/it] 57%|█████▋ | 298/520 [18:49<13:32, 3.66s/it] {'loss': 1.4074, 'grad_norm': 0.0038540849908332677, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:49<13:32, 3.66s/it] 57%|█████▊ | 299/520 [18:53<13:27, 3.66s/it] {'loss': 1.5693, 'grad_norm': 0.004155442781237494, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:53<13:27, 3.66s/it] 58%|█████▊ | 300/520 [18:57<13:27, 3.67s/it] {'loss': 1.4759, 'grad_norm': 0.004476612929121112, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:57<13:27, 3.67s/it] 58%|█████▊ | 301/520 [19:01<13:36, 3.73s/it] {'loss': 1.427, 'grad_norm': 0.004194519527393456, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:01<13:36, 3.73s/it] 58%|█████▊ | 302/520 [19:04<13:45, 3.78s/it] {'loss': 1.5907, 'grad_norm': 0.004410290851234228, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:04<13:45, 3.78s/it] 58%|█████▊ | 303/520 [19:08<13:47, 3.81s/it] {'loss': 1.3521, 'grad_norm': 0.004520586557843884, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:08<13:47, 3.81s/it] 58%|█████▊ | 304/520 [19:12<13:49, 3.84s/it] {'loss': 1.4793, 'grad_norm': 0.004858707625651718, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:12<13:49, 3.84s/it] 59%|█████▊ | 305/520 [19:16<13:48, 3.86s/it] {'loss': 1.4974, 'grad_norm': 0.005028560516875041, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:16<13:48, 3.86s/it] 59%|█████▉ | 306/520 [19:20<13:49, 3.87s/it] {'loss': 1.4041, 'grad_norm': 0.004459040678025371, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:20<13:49, 3.87s/it] 59%|█████▉ | 307/520 [19:24<14:15, 4.02s/it] {'loss': 1.3442, 'grad_norm': 0.00406390449495819, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:24<14:15, 4.02s/it] 59%|█████▉ | 308/520 [19:28<14:02, 3.97s/it] {'loss': 1.4682, 'grad_norm': 0.004251703626913705, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:28<14:02, 3.97s/it] 59%|█████▉ | 309/520 [19:32<13:51, 3.94s/it] {'loss': 1.3356, 'grad_norm': 0.003937610951687412, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:32<13:51, 3.94s/it] 60%|█████▉ | 310/520 [19:36<13:42, 3.92s/it] {'loss': 1.3182, 'grad_norm': 0.004468874848775937, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:36<13:42, 3.92s/it] 60%|█████▉ | 311/520 [19:40<13:35, 3.90s/it] {'loss': 1.281, 'grad_norm': 0.004001330739809223, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:40<13:35, 3.90s/it] 60%|██████ | 312/520 [19:44<13:28, 3.89s/it] {'loss': 1.2723, 'grad_norm': 0.00480664359681245, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:44<13:28, 3.89s/it] 60%|██████ | 313/520 [19:48<13:25, 3.89s/it] {'loss': 1.2589, 'grad_norm': 0.0036959681823600576, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:48<13:25, 3.89s/it] 60%|██████ | 314/520 [19:52<13:42, 3.99s/it] {'loss': 1.2962, 'grad_norm': 0.003851058878620666, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:52<13:42, 3.99s/it] 61%|██████ | 315/520 [19:56<13:29, 3.95s/it] {'loss': 1.5866, 'grad_norm': 0.005090854806856739, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:56<13:29, 3.95s/it] 61%|██████ | 316/520 [20:00<13:51, 4.08s/it] {'loss': 1.2663, 'grad_norm': 0.005112508346734006, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [20:00<13:51, 4.08s/it] 61%|██████ | 317/520 [20:04<13:35, 4.02s/it] {'loss': 1.292, 'grad_norm': 0.003815912016221358, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [20:04<13:35, 4.02s/it] 61%|██████ | 318/520 [20:08<13:24, 3.98s/it] {'loss': 1.4351, 'grad_norm': 0.004841070545902464, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:08<13:24, 3.98s/it] 61%|██████▏ | 319/520 [20:12<13:40, 4.08s/it] {'loss': 1.2809, 'grad_norm': 0.004484312486230725, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:12<13:40, 4.08s/it] 62%|██████▏ | 320/520 [20:16<13:23, 4.02s/it] {'loss': 1.2267, 'grad_norm': 0.004379592262420465, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:16<13:23, 4.02s/it] 62%|██████▏ | 321/520 [20:20<13:13, 3.99s/it] {'loss': 1.4396, 'grad_norm': 0.004413616155783197, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:20<13:13, 3.99s/it] 62%|██████▏ | 322/520 [20:24<13:02, 3.95s/it] {'loss': 1.4, 'grad_norm': 0.004799697022377068, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:24<13:02, 3.95s/it] 62%|██████▏ | 323/520 [20:28<12:52, 3.92s/it] {'loss': 1.4963, 'grad_norm': 0.005014062011007357, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:28<12:52, 3.92s/it] 62%|██████▏ | 324/520 [20:32<12:43, 3.90s/it] {'loss': 1.3583, 'grad_norm': 0.005162178162164249, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:32<12:43, 3.90s/it] 62%|██████▎ | 325/520 [20:35<12:38, 3.89s/it] {'loss': 1.3778, 'grad_norm': 0.004950723654285087, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:35<12:38, 3.89s/it] 63%|██████▎ | 326/520 [20:39<12:35, 3.89s/it] {'loss': 1.3373, 'grad_norm': 0.004104261495175718, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:39<12:35, 3.89s/it] 63%|██████▎ | 327/520 [20:43<12:29, 3.88s/it] {'loss': 1.5726, 'grad_norm': 0.005374598823889922, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:43<12:29, 3.88s/it] 63%|██████▎ | 328/520 [20:47<12:21, 3.86s/it] {'loss': 1.4422, 'grad_norm': 0.00444001291171894, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:47<12:21, 3.86s/it] 63%|██████▎ | 329/520 [20:51<12:07, 3.81s/it] {'loss': 1.2639, 'grad_norm': 0.003631153708238768, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:51<12:07, 3.81s/it] 63%|██████▎ | 330/520 [20:54<11:54, 3.76s/it] {'loss': 1.3443, 'grad_norm': 0.003881257426657212, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:54<11:54, 3.76s/it] 64%|██████▎ | 331/520 [20:58<11:44, 3.73s/it] {'loss': 1.3144, 'grad_norm': 0.004065642574624382, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:58<11:44, 3.73s/it] 64%|██████▍ | 332/520 [21:02<11:39, 3.72s/it] {'loss': 1.5646, 'grad_norm': 0.004385516660427347, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:02<11:39, 3.72s/it] 64%|██████▍ | 333/520 [21:05<11:33, 3.71s/it] {'loss': 1.5073, 'grad_norm': 0.004470877206899302, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:05<11:33, 3.71s/it] 64%|██████▍ | 334/520 [21:09<11:26, 3.69s/it] {'loss': 1.3537, 'grad_norm': 0.0045617080233201325, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:09<11:26, 3.69s/it] 64%|██████▍ | 335/520 [21:13<11:20, 3.68s/it] {'loss': 1.3549, 'grad_norm': 0.003910722420188998, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:13<11:20, 3.68s/it] 65%|██████▍ | 336/520 [21:16<11:19, 3.69s/it] {'loss': 1.2242, 'grad_norm': 0.004418691559269788, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:16<11:19, 3.69s/it] 65%|██████▍ | 337/520 [21:20<11:16, 3.70s/it] {'loss': 1.2343, 'grad_norm': 0.004095524459092046, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:20<11:16, 3.70s/it] 65%|██████▌ | 338/520 [21:24<11:12, 3.70s/it] {'loss': 1.3791, 'grad_norm': 0.004134597925613363, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:24<11:12, 3.70s/it] 65%|██████▌ | 339/520 [21:27<11:08, 3.69s/it] {'loss': 1.3105, 'grad_norm': 0.004150662387605905, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:27<11:08, 3.69s/it] 65%|██████▌ | 340/520 [21:31<11:05, 3.70s/it] {'loss': 1.2896, 'grad_norm': 0.00399395413898329, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:31<11:05, 3.70s/it] 66%|██████▌ | 341/520 [21:35<11:02, 3.70s/it] {'loss': 1.3225, 'grad_norm': 0.0042120933966446564, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:35<11:02, 3.70s/it] 66%|██████▌ | 342/520 [21:39<10:57, 3.69s/it] {'loss': 1.534, 'grad_norm': 0.0050224040623214694, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:39<10:57, 3.69s/it] 66%|██████▌ | 343/520 [21:42<10:54, 3.70s/it] {'loss': 1.4994, 'grad_norm': 0.0044504130502635636, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:42<10:54, 3.70s/it] 66%|██████▌ | 344/520 [21:46<10:50, 3.69s/it] {'loss': 1.2668, 'grad_norm': 0.004129056819827316, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:46<10:50, 3.69s/it] 66%|██████▋ | 345/520 [21:50<10:44, 3.68s/it] {'loss': 1.3998, 'grad_norm': 0.004357059860304038, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:50<10:44, 3.68s/it] 67%|██████▋ | 346/520 [21:53<10:41, 3.68s/it] {'loss': 1.4705, 'grad_norm': 0.0042561157603856654, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:53<10:41, 3.68s/it] 67%|██████▋ | 347/520 [21:57<10:36, 3.68s/it] {'loss': 1.2786, 'grad_norm': 0.003913438306435109, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:57<10:36, 3.68s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:01<10:39, 3.72s/it] {'loss': 1.2532, 'grad_norm': 0.004806953547830382, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:01<10:39, 3.72s/it] 67%|██████▋ | 349/520 [22:05<10:41, 3.75s/it] {'loss': 1.2968, 'grad_norm': 0.004127874136989094, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:05<10:41, 3.75s/it] 67%|██████▋ | 350/520 [22:08<10:41, 3.77s/it] {'loss': 1.33, 'grad_norm': 0.004317420491050251, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:08<10:41, 3.77s/it] 68%|██████▊ | 351/520 [22:12<10:41, 3.79s/it] {'loss': 1.236, 'grad_norm': 0.0038449693078442433, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:12<10:41, 3.79s/it] 68%|██████▊ | 352/520 [22:16<10:40, 3.81s/it] {'loss': 1.3538, 'grad_norm': 0.0038223139665894356, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:16<10:40, 3.81s/it] 68%|██████▊ | 353/520 [22:20<10:34, 3.80s/it] {'loss': 1.4179, 'grad_norm': 0.003644682861905628, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:20<10:34, 3.80s/it] 68%|██████▊ | 354/520 [22:24<10:27, 3.78s/it] {'loss': 1.5817, 'grad_norm': 0.004239860551343252, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:24<10:27, 3.78s/it] 68%|██████▊ | 355/520 [22:27<10:20, 3.76s/it] {'loss': 1.2982, 'grad_norm': 0.003917356336260178, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:27<10:20, 3.76s/it] 68%|██████▊ | 356/520 [22:31<10:13, 3.74s/it] {'loss': 1.2904, 'grad_norm': 0.0041439927079132575, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:31<10:13, 3.74s/it] 69%|██████▊ | 357/520 [22:35<10:09, 3.74s/it] {'loss': 1.3009, 'grad_norm': 0.0036477596591329383, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:35<10:09, 3.74s/it] 69%|██████▉ | 358/520 [22:38<10:05, 3.74s/it] {'loss': 1.2398, 'grad_norm': 0.0039043183179865936, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:38<10:05, 3.74s/it] 69%|██████▉ | 359/520 [22:42<10:04, 3.75s/it] {'loss': 1.4907, 'grad_norm': 0.0048460436792168516, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:42<10:04, 3.75s/it] 69%|██████▉ | 360/520 [22:46<10:01, 3.76s/it] {'loss': 1.5054, 'grad_norm': 0.004588133048332292, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:46<10:01, 3.76s/it] 69%|██████▉ | 361/520 [22:50<09:56, 3.75s/it] {'loss': 1.5003, 'grad_norm': 0.003999551243742659, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:50<09:56, 3.75s/it] 70%|██████▉ | 362/520 [22:53<09:49, 3.73s/it] {'loss': 1.2968, 'grad_norm': 0.004387197715049025, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:53<09:49, 3.73s/it] 70%|██████▉ | 363/520 [22:57<09:43, 3.72s/it] {'loss': 1.3423, 'grad_norm': 0.0037780421895270166, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:57<09:43, 3.72s/it] 70%|███████ | 364/520 [23:01<09:39, 3.71s/it] {'loss': 1.5108, 'grad_norm': 0.003972675441680533, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [23:01<09:39, 3.71s/it] 70%|███████ | 365/520 [23:05<09:36, 3.72s/it] {'loss': 1.407, 'grad_norm': 0.004156886059891095, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [23:05<09:36, 3.72s/it] 70%|███████ | 366/520 [23:08<09:32, 3.72s/it] {'loss': 1.3534, 'grad_norm': 0.0038237063589734856, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:08<09:32, 3.72s/it] 71%|███████ | 367/520 [23:12<09:28, 3.72s/it] {'loss': 1.3605, 'grad_norm': 0.003980637797861761, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:12<09:28, 3.72s/it] 71%|███████ | 368/520 [23:16<09:25, 3.72s/it] {'loss': 1.206, 'grad_norm': 0.0043152623288132165, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:16<09:25, 3.72s/it] 71%|███████ | 369/520 [23:19<09:21, 3.72s/it] {'loss': 1.4592, 'grad_norm': 0.004077617804738474, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:19<09:21, 3.72s/it] 71%|███████ | 370/520 [23:23<09:17, 3.72s/it] {'loss': 1.2596, 'grad_norm': 0.003958094856115144, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:23<09:17, 3.72s/it] 71%|███████▏ | 371/520 [23:27<09:14, 3.72s/it] {'loss': 1.2574, 'grad_norm': 0.00428535419020104, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:27<09:14, 3.72s/it] 72%|███████▏ | 372/520 [23:31<09:11, 3.72s/it] {'loss': 1.5708, 'grad_norm': 0.003858130265443928, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:31<09:11, 3.72s/it] 72%|███████▏ | 373/520 [23:34<09:10, 3.75s/it] {'loss': 1.4373, 'grad_norm': 0.004558108007819902, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:34<09:10, 3.75s/it] 72%|███████▏ | 374/520 [23:38<09:12, 3.79s/it] {'loss': 1.3407, 'grad_norm': 0.004211223848889737, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:38<09:12, 3.79s/it] 72%|███████▏ | 375/520 [23:42<09:12, 3.81s/it] {'loss': 1.239, 'grad_norm': 0.004225452776663543, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:42<09:12, 3.81s/it] 72%|███████▏ | 376/520 [23:46<09:11, 3.83s/it] {'loss': 1.373, 'grad_norm': 0.0038344281568938846, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:46<09:11, 3.83s/it] 72%|███████▎ | 377/520 [23:50<09:10, 3.85s/it] {'loss': 1.3141, 'grad_norm': 0.0051748425960948825, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:50<09:10, 3.85s/it] 73%|███████▎ | 378/520 [23:54<09:07, 3.85s/it] {'loss': 1.3675, 'grad_norm': 0.0038613792776856274, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:54<09:07, 3.85s/it] 73%|███████▎ | 379/520 [23:58<09:06, 3.87s/it] {'loss': 1.3538, 'grad_norm': 0.003845023029286576, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:58<09:06, 3.87s/it] 73%|███████▎ | 380/520 [24:02<09:02, 3.87s/it] {'loss': 1.5592, 'grad_norm': 0.004920425477899893, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:02<09:02, 3.87s/it] 73%|███████▎ | 381/520 [24:06<09:01, 3.89s/it] {'loss': 1.3438, 'grad_norm': 0.0043084904274875, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:06<09:01, 3.89s/it] 73%|███████▎ | 382/520 [24:09<08:56, 3.89s/it] {'loss': 1.4729, 'grad_norm': 0.004174595347941862, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:09<08:56, 3.89s/it] 74%|███████▎ | 383/520 [24:13<08:51, 3.88s/it] {'loss': 1.1754, 'grad_norm': 0.004334813187341467, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:13<08:51, 3.88s/it] 74%|███████▍ | 384/520 [24:17<08:39, 3.82s/it] {'loss': 1.6608, 'grad_norm': 0.004188759061286612, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:17<08:39, 3.82s/it] 74%|███████▍ | 385/520 [24:21<08:31, 3.79s/it] {'loss': 1.3266, 'grad_norm': 0.003795108084151935, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:21<08:31, 3.79s/it] 74%|███████▍ | 386/520 [24:24<08:24, 3.76s/it] {'loss': 1.2547, 'grad_norm': 0.003521727420631429, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:24<08:24, 3.76s/it] 74%|███████▍ | 387/520 [24:28<08:17, 3.74s/it] {'loss': 1.5735, 'grad_norm': 0.004309900904005043, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:28<08:17, 3.74s/it] 75%|███████▍ | 388/520 [24:32<08:14, 3.75s/it] {'loss': 1.2164, 'grad_norm': 0.003880973442472429, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:32<08:14, 3.75s/it] 75%|███████▍ | 389/520 [24:36<08:09, 3.74s/it] {'loss': 1.2785, 'grad_norm': 0.004647949751348532, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:36<08:09, 3.74s/it] 75%|███████▌ | 390/520 [24:39<08:03, 3.72s/it] {'loss': 1.3383, 'grad_norm': 0.003673462840917388, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:39<08:03, 3.72s/it] 75%|███████▌ | 391/520 [24:43<07:58, 3.71s/it] {'loss': 1.4397, 'grad_norm': 0.004019905837589887, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:43<07:58, 3.71s/it] 75%|███████▌ | 392/520 [24:47<07:54, 3.71s/it] {'loss': 1.2289, 'grad_norm': 0.003848088961405763, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:47<07:54, 3.71s/it] 76%|███████▌ | 393/520 [24:50<07:56, 3.75s/it] {'loss': 1.337, 'grad_norm': 0.0037585912372672782, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:50<07:56, 3.75s/it] 76%|███████▌ | 394/520 [24:54<07:56, 3.78s/it] {'loss': 1.2924, 'grad_norm': 0.004591771047889407, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:54<07:56, 3.78s/it] 76%|███████▌ | 395/520 [24:58<07:56, 3.81s/it] {'loss': 1.2514, 'grad_norm': 0.004600981441564858, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:58<07:56, 3.81s/it] 76%|███████▌ | 396/520 [25:02<07:53, 3.82s/it] {'loss': 1.3418, 'grad_norm': 0.004272416971532913, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:02<07:53, 3.82s/it] 76%|███████▋ | 397/520 [25:06<07:52, 3.84s/it] {'loss': 1.334, 'grad_norm': 0.003950929183113846, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:06<07:52, 3.84s/it] 77%|███████▋ | 398/520 [25:10<07:49, 3.85s/it] {'loss': 1.3169, 'grad_norm': 0.0041318925489418415, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:10<07:49, 3.85s/it] 77%|███████▋ | 399/520 [25:14<07:45, 3.85s/it] {'loss': 1.404, 'grad_norm': 0.004138272949816707, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:14<07:45, 3.85s/it] 77%|███████▋ | 400/520 [25:18<07:42, 3.85s/it] {'loss': 1.4627, 'grad_norm': 0.003956349465572958, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:18<07:42, 3.85s/it] 77%|███████▋ | 401/520 [25:21<07:39, 3.86s/it] {'loss': 1.113, 'grad_norm': 0.0039301497685081685, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:21<07:39, 3.86s/it] 77%|███████▋ | 402/520 [25:25<07:36, 3.86s/it] {'loss': 1.2552, 'grad_norm': 0.004264677827170357, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:25<07:36, 3.86s/it] 78%|███████▊ | 403/520 [25:29<07:31, 3.86s/it] {'loss': 1.2935, 'grad_norm': 0.00427333649648755, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:29<07:31, 3.86s/it] 78%|███████▊ | 404/520 [25:33<07:27, 3.86s/it] {'loss': 1.2025, 'grad_norm': 0.005388941882988711, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:33<07:27, 3.86s/it] 78%|███████▊ | 405/520 [25:37<07:25, 3.87s/it] {'loss': 1.4004, 'grad_norm': 0.004256545471460245, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:37<07:25, 3.87s/it] 78%|███████▊ | 406/520 [25:41<07:19, 3.86s/it] {'loss': 1.3404, 'grad_norm': 0.004849792282003421, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:41<07:19, 3.86s/it] 78%|███████▊ | 407/520 [25:44<07:13, 3.84s/it] {'loss': 1.3926, 'grad_norm': 0.003968088545431296, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:44<07:13, 3.84s/it] 78%|███████▊ | 408/520 [25:48<07:04, 3.79s/it] {'loss': 1.2656, 'grad_norm': 0.004181864905906568, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:48<07:04, 3.79s/it] 79%|███████▊ | 409/520 [25:52<06:58, 3.77s/it] {'loss': 1.4107, 'grad_norm': 0.004483305590063735, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:52<06:58, 3.77s/it] 79%|███████▉ | 410/520 [25:56<06:52, 3.75s/it] {'loss': 1.1125, 'grad_norm': 0.004482710040349119, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:56<06:52, 3.75s/it] 79%|███████▉ | 411/520 [25:59<06:45, 3.72s/it] {'loss': 1.3821, 'grad_norm': 0.0043057915625545, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:59<06:45, 3.72s/it] 79%|███████▉ | 412/520 [26:03<06:40, 3.71s/it] {'loss': 1.2935, 'grad_norm': 0.0039050883047705934, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:03<06:40, 3.71s/it] 79%|███████▉ | 413/520 [26:07<06:36, 3.70s/it] {'loss': 1.4581, 'grad_norm': 0.003955121962596029, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:07<06:36, 3.70s/it] 80%|███████▉ | 414/520 [26:10<06:32, 3.70s/it] {'loss': 1.2084, 'grad_norm': 0.003575951243194012, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:10<06:32, 3.70s/it] 80%|███████▉ | 415/520 [26:14<06:29, 3.70s/it] {'loss': 1.2762, 'grad_norm': 0.003885090740223117, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:14<06:29, 3.70s/it] 80%|████████ | 416/520 [26:18<06:26, 3.71s/it] {'loss': 1.1707, 'grad_norm': 0.004657283256651519, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:18<06:26, 3.71s/it] 80%|████████ | 417/520 [26:21<06:23, 3.72s/it] {'loss': 1.3545, 'grad_norm': 0.004432770275699724, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:21<06:23, 3.72s/it] 80%|████████ | 418/520 [26:25<06:19, 3.72s/it] {'loss': 1.335, 'grad_norm': 0.0038592694535031715, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:25<06:19, 3.72s/it] 81%|████████ | 419/520 [26:29<06:15, 3.72s/it] {'loss': 1.321, 'grad_norm': 0.0041974514771246215, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:29<06:15, 3.72s/it] 81%|████████ | 420/520 [26:33<06:12, 3.72s/it] {'loss': 1.191, 'grad_norm': 0.004345324824721946, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:33<06:12, 3.72s/it] 81%|████████ | 421/520 [26:36<06:09, 3.73s/it] {'loss': 1.1252, 'grad_norm': 0.004201759729669283, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:36<06:09, 3.73s/it] 81%|████████ | 422/520 [26:40<06:05, 3.73s/it] {'loss': 1.2634, 'grad_norm': 0.004477986764434938, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:40<06:05, 3.73s/it] 81%|████████▏ | 423/520 [26:44<06:01, 3.73s/it] {'loss': 1.2589, 'grad_norm': 0.004668027649869905, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:44<06:01, 3.73s/it] 82%|████████▏ | 424/520 [26:48<05:59, 3.74s/it] {'loss': 1.5309, 'grad_norm': 0.004780848192980123, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:48<05:59, 3.74s/it] 82%|████████▏ | 425/520 [26:51<05:55, 3.74s/it] {'loss': 1.262, 'grad_norm': 0.004007467587304358, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:51<05:55, 3.74s/it] 82%|████████▏ | 426/520 [26:55<05:51, 3.74s/it] {'loss': 1.2825, 'grad_norm': 0.00551197849317997, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:55<05:51, 3.74s/it] 82%|████████▏ | 427/520 [26:59<05:45, 3.71s/it] {'loss': 1.1889, 'grad_norm': 0.004005526659681355, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:59<05:45, 3.71s/it] 82%|████████▏ | 428/520 [27:02<05:41, 3.71s/it] {'loss': 1.162, 'grad_norm': 0.004419434698121936, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:02<05:41, 3.71s/it] 82%|████████▎ | 429/520 [27:06<05:39, 3.73s/it] {'loss': 1.263, 'grad_norm': 0.003797850071118386, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:06<05:39, 3.73s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:10<05:35, 3.73s/it] {'loss': 1.2605, 'grad_norm': 0.0035705127720253773, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:10<05:35, 3.73s/it] 83%|████████▎ | 431/520 [27:14<05:31, 3.72s/it] {'loss': 1.4122, 'grad_norm': 0.004270770707204546, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:14<05:31, 3.72s/it] 83%|████████▎ | 432/520 [27:17<05:28, 3.73s/it] {'loss': 1.1712, 'grad_norm': 0.004495762275327721, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:17<05:28, 3.73s/it] 83%|████████▎ | 433/520 [27:21<05:24, 3.72s/it] {'loss': 1.3124, 'grad_norm': 0.004055315974166465, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:21<05:24, 3.72s/it] 83%|████████▎ | 434/520 [27:25<05:21, 3.73s/it] {'loss': 1.0388, 'grad_norm': 0.0037520457557138143, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:25<05:21, 3.73s/it] 84%|████████▎ | 435/520 [27:29<05:16, 3.72s/it] {'loss': 1.352, 'grad_norm': 0.0041272047171957245, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:29<05:16, 3.72s/it] 84%|████████▍ | 436/520 [27:32<05:12, 3.72s/it] {'loss': 1.1342, 'grad_norm': 0.0038825961627259355, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:32<05:12, 3.72s/it] 84%|████████▍ | 437/520 [27:36<05:08, 3.72s/it] {'loss': 1.3846, 'grad_norm': 0.004049439900209677, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:36<05:08, 3.72s/it] 84%|████████▍ | 438/520 [27:40<05:04, 3.71s/it] {'loss': 1.1619, 'grad_norm': 0.0038960994929311383, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:40<05:04, 3.71s/it] 84%|████████▍ | 439/520 [27:43<05:00, 3.71s/it] {'loss': 1.3599, 'grad_norm': 0.0033728507442605258, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:43<05:00, 3.71s/it] 85%|████████▍ | 440/520 [27:47<04:56, 3.71s/it] {'loss': 1.2361, 'grad_norm': 0.00382876824090446, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:47<04:56, 3.71s/it] 85%|████████▍ | 441/520 [27:51<04:53, 3.71s/it] {'loss': 1.4166, 'grad_norm': 0.004237196912161237, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:51<04:53, 3.71s/it] 85%|████████▌ | 442/520 [27:55<04:50, 3.72s/it] {'loss': 1.2822, 'grad_norm': 0.004926615932263179, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:55<04:50, 3.72s/it] 85%|████████▌ | 443/520 [27:58<04:45, 3.71s/it] {'loss': 1.3013, 'grad_norm': 0.004049691658716893, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:58<04:45, 3.71s/it] 85%|████████▌ | 444/520 [28:02<04:42, 3.72s/it] {'loss': 1.2708, 'grad_norm': 0.0034703677870288174, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:02<04:42, 3.72s/it] 86%|████████▌ | 445/520 [28:06<04:38, 3.72s/it] {'loss': 1.187, 'grad_norm': 0.004011533457770011, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:06<04:38, 3.72s/it] 86%|████████▌ | 446/520 [28:09<04:35, 3.72s/it] {'loss': 1.4797, 'grad_norm': 0.003864452579259735, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:09<04:35, 3.72s/it] 86%|████████▌ | 447/520 [28:13<04:31, 3.72s/it] {'loss': 1.2925, 'grad_norm': 0.004311774901797713, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:13<04:31, 3.72s/it] 86%|████████▌ | 448/520 [28:17<04:28, 3.72s/it] {'loss': 1.2523, 'grad_norm': 0.0038179896171880805, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:17<04:28, 3.72s/it] 86%|████████▋ | 449/520 [28:21<04:23, 3.71s/it] {'loss': 1.4336, 'grad_norm': 0.0043408701553043876, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:21<04:23, 3.71s/it] 87%|████████▋ | 450/520 [28:24<04:19, 3.71s/it] {'loss': 1.3109, 'grad_norm': 0.003943460643657138, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:24<04:19, 3.71s/it] 87%|████████▋ | 451/520 [28:28<04:15, 3.71s/it] {'loss': 1.2992, 'grad_norm': 0.004323185492144426, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:28<04:15, 3.71s/it] 87%|████████▋ | 452/520 [28:32<04:12, 3.71s/it] {'loss': 1.4772, 'grad_norm': 0.004178035838891773, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:32<04:12, 3.71s/it] 87%|████████▋ | 453/520 [28:35<04:08, 3.71s/it] {'loss': 1.4365, 'grad_norm': 0.004040025023652321, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:35<04:08, 3.71s/it] 87%|████████▋ | 454/520 [28:39<04:04, 3.71s/it] {'loss': 1.2102, 'grad_norm': 0.00415871643386988, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:39<04:04, 3.71s/it] 88%|████████▊ | 455/520 [28:43<04:00, 3.71s/it] {'loss': 1.3513, 'grad_norm': 0.003992931498493516, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:43<04:00, 3.71s/it] 88%|████████▊ | 456/520 [28:47<03:57, 3.72s/it] {'loss': 1.2548, 'grad_norm': 0.004007068423285441, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:47<03:57, 3.72s/it] 88%|████████▊ | 457/520 [28:50<03:55, 3.74s/it] {'loss': 1.488, 'grad_norm': 0.0038720751052961746, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:50<03:55, 3.74s/it] 88%|████████▊ | 458/520 [28:54<03:55, 3.80s/it] {'loss': 1.4109, 'grad_norm': 0.004224746142685796, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:54<03:55, 3.80s/it] 88%|████████▊ | 459/520 [28:58<03:54, 3.84s/it] {'loss': 1.3293, 'grad_norm': 0.003746419333876775, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:58<03:54, 3.84s/it] 88%|████████▊ | 460/520 [29:02<03:51, 3.86s/it] {'loss': 1.1931, 'grad_norm': 0.0038452810610551877, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:02<03:51, 3.86s/it] 89%|████████▊ | 461/520 [29:06<03:49, 3.88s/it] {'loss': 1.5541, 'grad_norm': 0.0036996866396671883, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:06<03:49, 3.88s/it] 89%|████████▉ | 462/520 [29:10<03:45, 3.89s/it] {'loss': 1.5226, 'grad_norm': 0.00407197882327882, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:10<03:45, 3.89s/it] 89%|████████▉ | 463/520 [29:14<03:41, 3.89s/it] {'loss': 1.1594, 'grad_norm': 0.004211123157914977, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:14<03:41, 3.89s/it] 89%|████████▉ | 464/520 [29:18<03:38, 3.90s/it] {'loss': 1.3278, 'grad_norm': 0.003995807877520006, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:18<03:38, 3.90s/it] 89%|████████▉ | 465/520 [29:22<03:34, 3.90s/it] {'loss': 1.4418, 'grad_norm': 0.004682388027075611, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:22<03:34, 3.90s/it] 90%|████████▉ | 466/520 [29:26<03:31, 3.91s/it] {'loss': 1.3005, 'grad_norm': 0.003467359001723457, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:26<03:31, 3.91s/it] 90%|████████▉ | 467/520 [29:30<03:27, 3.91s/it] {'loss': 1.3885, 'grad_norm': 0.004024736104882356, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:30<03:27, 3.91s/it] 90%|█████████ | 468/520 [29:33<03:20, 3.86s/it] {'loss': 1.2868, 'grad_norm': 0.004645209722880851, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:33<03:20, 3.86s/it] 90%|█████████ | 469/520 [29:37<03:15, 3.84s/it] {'loss': 1.3466, 'grad_norm': 0.004235254709145771, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:37<03:15, 3.84s/it] 90%|█████████ | 470/520 [29:41<03:10, 3.82s/it] {'loss': 1.2082, 'grad_norm': 0.0036435467453629308, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:41<03:10, 3.82s/it] 91%|█████████ | 471/520 [29:45<03:05, 3.78s/it] {'loss': 1.2317, 'grad_norm': 0.0042355819036151345, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:45<03:05, 3.78s/it] 91%|█████████ | 472/520 [29:48<03:00, 3.77s/it] {'loss': 1.2008, 'grad_norm': 0.003808982868461537, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:48<03:00, 3.77s/it] 91%|█████████ | 473/520 [29:52<02:57, 3.78s/it] {'loss': 1.2484, 'grad_norm': 0.00402449811055837, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:52<02:57, 3.78s/it] 91%|█████████ | 474/520 [29:56<02:53, 3.77s/it] {'loss': 1.424, 'grad_norm': 0.003830285491847169, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:56<02:53, 3.77s/it] 91%|█████████▏| 475/520 [30:00<02:50, 3.78s/it] {'loss': 1.3415, 'grad_norm': 0.003845171074239067, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [30:00<02:50, 3.78s/it] 92%|█████████▏| 476/520 [30:03<02:45, 3.75s/it] {'loss': 1.2488, 'grad_norm': 0.004509731865078635, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:03<02:45, 3.75s/it] 92%|█████████▏| 477/520 [30:07<02:40, 3.74s/it] {'loss': 1.24, 'grad_norm': 0.004839613500235923, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:07<02:40, 3.74s/it] 92%|█████████▏| 478/520 [30:11<02:36, 3.73s/it] {'loss': 1.2004, 'grad_norm': 0.003974472624572657, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:11<02:36, 3.73s/it] 92%|█████████▏| 479/520 [30:14<02:33, 3.73s/it] {'loss': 1.4176, 'grad_norm': 0.004702668329135087, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:14<02:33, 3.73s/it] 92%|█████████▏| 480/520 [30:18<02:30, 3.77s/it] {'loss': 1.4173, 'grad_norm': 0.0038196012332568146, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:18<02:30, 3.77s/it] 92%|█████████▎| 481/520 [30:22<02:27, 3.78s/it] {'loss': 1.4502, 'grad_norm': 0.0037760336605844147, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:22<02:27, 3.78s/it] 93%|█████████▎| 482/520 [30:26<02:24, 3.79s/it] {'loss': 1.4396, 'grad_norm': 0.00383338135191916, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:26<02:24, 3.79s/it] 93%|█████████▎| 483/520 [30:30<02:19, 3.78s/it] {'loss': 1.2802, 'grad_norm': 0.004272732588542076, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:30<02:19, 3.78s/it] 93%|█████████▎| 484/520 [30:33<02:15, 3.75s/it] {'loss': 1.2751, 'grad_norm': 0.00402856545430907, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:33<02:15, 3.75s/it] 93%|█████████▎| 485/520 [30:37<02:10, 3.74s/it] {'loss': 1.2158, 'grad_norm': 0.0038113360062052874, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:37<02:10, 3.74s/it] 93%|█████████▎| 486/520 [30:41<02:07, 3.75s/it] {'loss': 1.3469, 'grad_norm': 0.00417916087183019, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:41<02:07, 3.75s/it] 94%|█████████▎| 487/520 [30:45<02:02, 3.73s/it] {'loss': 1.1977, 'grad_norm': 0.004055542930977069, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:45<02:02, 3.73s/it] 94%|█████████▍| 488/520 [30:48<01:58, 3.71s/it] {'loss': 1.1259, 'grad_norm': 0.004040345078543012, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:48<01:58, 3.71s/it] 94%|█████████▍| 489/520 [30:52<01:54, 3.70s/it] {'loss': 1.4267, 'grad_norm': 0.003668706816598015, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:52<01:54, 3.70s/it] 94%|█████████▍| 490/520 [30:56<01:50, 3.70s/it] {'loss': 1.2636, 'grad_norm': 0.004158064842810167, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:56<01:50, 3.70s/it] 94%|█████████▍| 491/520 [30:59<01:47, 3.69s/it] {'loss': 1.218, 'grad_norm': 0.004008303120176962, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:59<01:47, 3.69s/it] 95%|█████████▍| 492/520 [31:03<01:43, 3.69s/it] {'loss': 1.3558, 'grad_norm': 0.004825540769472819, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:03<01:43, 3.69s/it] 95%|█████████▍| 493/520 [31:07<01:39, 3.70s/it] {'loss': 1.4967, 'grad_norm': 0.004281844254557577, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:07<01:39, 3.70s/it] 95%|█████████▌| 494/520 [31:10<01:36, 3.70s/it] {'loss': 1.2919, 'grad_norm': 0.0037171543296423137, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:10<01:36, 3.70s/it] 95%|█████████▌| 495/520 [31:14<01:32, 3.69s/it] {'loss': 1.2218, 'grad_norm': 0.0038773601151656225, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:14<01:32, 3.69s/it] 95%|█████████▌| 496/520 [31:18<01:28, 3.70s/it] {'loss': 1.162, 'grad_norm': 0.003908627864833279, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:18<01:28, 3.70s/it] 96%|█████████▌| 497/520 [31:21<01:25, 3.70s/it] {'loss': 1.3525, 'grad_norm': 0.0035563359660622166, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:21<01:25, 3.70s/it] 96%|█████████▌| 498/520 [31:25<01:21, 3.70s/it] {'loss': 1.2361, 'grad_norm': 0.004166893578635473, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:25<01:21, 3.70s/it] 96%|█████████▌| 499/520 [31:29<01:17, 3.70s/it] {'loss': 1.5072, 'grad_norm': 0.004013166212053632, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:29<01:17, 3.70s/it] 96%|█████████▌| 500/520 [31:33<01:14, 3.71s/it] {'loss': 1.3654, 'grad_norm': 0.004635634668333776, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:33<01:14, 3.71s/it] 96%|█████████▋| 501/520 [31:37<01:11, 3.78s/it] {'loss': 1.4277, 'grad_norm': 0.005209109168354272, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:37<01:11, 3.78s/it] 97%|█████████▋| 502/520 [31:40<01:08, 3.82s/it] {'loss': 1.2683, 'grad_norm': 0.003806869371633472, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:40<01:08, 3.82s/it] 97%|█████████▋| 503/520 [31:44<01:05, 3.85s/it] {'loss': 1.3925, 'grad_norm': 0.0041418068964444825, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:44<01:05, 3.85s/it] 97%|█████████▋| 504/520 [31:48<01:02, 3.88s/it] {'loss': 1.2819, 'grad_norm': 0.004493017769600147, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:48<01:02, 3.88s/it] 97%|█████████▋| 505/520 [31:52<00:58, 3.90s/it] {'loss': 1.3208, 'grad_norm': 0.003976992958502504, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:52<00:58, 3.90s/it] 97%|█████████▋| 506/520 [31:56<00:54, 3.92s/it] {'loss': 1.2301, 'grad_norm': 0.004410207746082429, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:56<00:54, 3.92s/it] 98%|█████████▊| 507/520 [32:00<00:51, 3.93s/it] {'loss': 1.5522, 'grad_norm': 0.0039148603811008705, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:00<00:51, 3.93s/it] 98%|█████████▊| 508/520 [32:04<00:47, 3.94s/it] {'loss': 1.3564, 'grad_norm': 0.004154708381415211, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:04<00:47, 3.94s/it] 98%|█████████▊| 509/520 [32:08<00:43, 3.94s/it] {'loss': 1.3131, 'grad_norm': 0.0038013880826724407, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:08<00:43, 3.94s/it] 98%|█████████▊| 510/520 [32:12<00:39, 3.94s/it] {'loss': 1.2663, 'grad_norm': 0.0039031595314977293, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:12<00:39, 3.94s/it] 98%|█████████▊| 511/520 [32:16<00:35, 3.93s/it] {'loss': 1.2409, 'grad_norm': 0.0036741164753264603, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:16<00:35, 3.93s/it] 98%|█████████▊| 512/520 [32:20<00:31, 3.93s/it] {'loss': 1.1281, 'grad_norm': 0.0038488422221525987, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:20<00:31, 3.93s/it] 99%|█████████▊| 513/520 [32:24<00:27, 3.91s/it] {'loss': 1.3301, 'grad_norm': 0.004912690866627133, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:24<00:27, 3.91s/it] 99%|█████████▉| 514/520 [32:28<00:23, 3.91s/it] {'loss': 1.3142, 'grad_norm': 0.003569364435509242, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:28<00:23, 3.91s/it] 99%|█████████▉| 515/520 [32:32<00:19, 3.91s/it] {'loss': 1.3708, 'grad_norm': 0.004602381114831822, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:32<00:19, 3.91s/it] 99%|█████████▉| 516/520 [32:35<00:15, 3.91s/it] {'loss': 1.2298, 'grad_norm': 0.0038960626083883675, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:35<00:15, 3.91s/it] 99%|█████████▉| 517/520 [32:39<00:11, 3.90s/it] {'loss': 1.4637, 'grad_norm': 0.003943596289219283, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:39<00:11, 3.90s/it] 100%|█████████▉| 518/520 [32:43<00:07, 3.88s/it] {'loss': 1.2756, 'grad_norm': 0.004147880375471869, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:43<00:07, 3.88s/it] 100%|█████████▉| 519/520 [32:47<00:03, 3.88s/it] {'loss': 1.4012, 'grad_norm': 0.003922986399766949, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:47<00:03, 3.88s/it] 100%|██████████| 520/520 [32:52<00:00, 4.14s/it] {'loss': 1.4977, 'grad_norm': 0.004237581329132448, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:52<00:00, 4.14s/it] {'train_runtime': 1972.3347, 'train_samples_per_second': 33.731, 'train_steps_per_second': 0.264, 'train_loss': 1.5323313859792855, 'epoch': 1.0} + 100%|██████████| 520/520 [32:52<00:00, 4.14s/it] 100%|██████████| 520/520 [32:52<00:00, 3.79s/it] +[2025-10-10 08:05:48,522] [INFO] [launch.py:348:main] Process 619198 exits successfully. +[2025-10-10 08:05:49,523] [INFO] [launch.py:348:main] Process 619194 exits successfully. +[2025-10-10 08:05:49,524] [INFO] [launch.py:348:main] Process 619197 exits successfully. +[2025-10-10 08:05:49,524] [INFO] [launch.py:348:main] Process 619196 exits successfully. +[2025-10-10 08:05:49,525] [INFO] [launch.py:348:main] Process 619200 exits successfully. +[2025-10-10 08:05:49,525] [INFO] [launch.py:348:main] Process 619199 exits successfully. +[2025-10-10 08:05:50,527] [INFO] [launch.py:348:main] Process 619195 exits successfully. +[2025-10-10 08:05:53,531] [INFO] [launch.py:348:main] Process 619193 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.7_2e-1_connector-1.0_0.7_2e-1_ablation_20251010_072425.log +Timestamp: 2025-10-10 08:05:55 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_060544.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_060544.log new file mode 100644 index 0000000000000000000000000000000000000000..664e4ebc96038ffb9ac251aa4ea65ffaeab22121 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_060544.log @@ -0,0 +1,1167 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_060544.log +Timestamp: 2025-10-10 06:05:44 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:05:46,988] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:05:49,724] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 06:05:49,725] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.9 --temperature_mlp_text 0.9 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.9 --temperature_mlp_vision 0.9 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.9 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:05:52,291] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:05:53,338] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 06:05:53,338] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 06:05:53,338] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 06:05:53,339] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 06:05:53,339] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 06:05:53,339] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 06:05:53,339] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 06:05:53,341] [INFO] [launch.py:253:main] process 560830 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:05:53,343] [INFO] [launch.py:253:main] process 560831 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:05:53,345] [INFO] [launch.py:253:main] process 560832 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:05:53,347] [INFO] [launch.py:253:main] process 560833 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:05:53,349] [INFO] [launch.py:253:main] process 560834 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:05:53,351] [INFO] [launch.py:253:main] process 560835 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:05:53,353] [INFO] [launch.py:253:main] process 560836 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:05:53,354] [INFO] [launch.py:253:main] process 560837 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:05:59,975] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:06:00,138] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:06:00,141] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:06:00,183] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:06:00,193] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:06:00,194] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:06:00,229] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:06:00,231] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:06:00,368] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:06:00,535] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:06:00,537] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:06:00,537] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 06:06:00,590] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:06:00,596] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:06:00,598] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:06:00,621] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:06:00,625] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.9, 'temperature_mlp': 0.9, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.9, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.9, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.9, + "temperature_mlp": 0.9, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:560830:560830 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560830:560830 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560830:560830 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:560830:560830 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:560830:560830 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:560830:560830 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test2-worker-0:560833:560833 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:560833:560833 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560833:560833 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560836:560836 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:560833:560833 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:560836:560836 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560833:560833 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:560833:560833 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:560836:560836 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560836:560836 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:560836:560836 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:560836:560836 [6] NCCL INFO NET/Plugin: Using internal network plugin. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:560834:560834 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:560834:560834 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560834:560834 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560834:560834 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:560834:560834 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:560834:560834 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:560837:560837 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:560837:560837 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560837:560837 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560837:560837 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:560837:560837 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:560837:560837 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:560832:560832 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:560832:560832 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560832:560832 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560831:560831 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:560831:560831 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560831:560831 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560832:560832 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:560832:560832 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:560832:560832 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:560831:560831 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:560831:560831 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:560831:560831 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:560835:560835 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:560835:560835 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560835:560835 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560835:560835 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:560835:560835 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:560835:560835 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO ncclCommInitRank comm 0x560df2f3ee50 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xf2e66213725fb91a - Init START +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO ncclCommInitRank comm 0x560f20f65e20 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xf2e66213725fb91a - Init START +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO ncclCommInitRank comm 0x563a804a1150 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xf2e66213725fb91a - Init START +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO ncclCommInitRank comm 0x555fe43b4680 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xf2e66213725fb91a - Init START +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO ncclCommInitRank comm 0x5604f6b237b0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xf2e66213725fb91a - Init START +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO ncclCommInitRank comm 0x55754519d6f0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xf2e66213725fb91a - Init START +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO ncclCommInitRank comm 0x55e6ca5e5060 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xf2e66213725fb91a - Init START +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO ncclCommInitRank comm 0x55b7372a2c80 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xf2e66213725fb91a - Init START +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO comm 0x563a804a1150 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO comm 0x55e6ca5e5060 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO comm 0x5604f6b237b0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO comm 0x55b7372a2c80 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO comm 0x55754519d6f0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO comm 0x560df2f3ee50 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO comm 0x555fe43b4680 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO comm 0x560f20f65e20 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:560832:562496 [2] NCCL INFO ncclCommInitRank comm 0x563a804a1150 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xf2e66213725fb91a - Init COMPLETE +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:560834:562494 [4] NCCL INFO ncclCommInitRank comm 0x555fe43b4680 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xf2e66213725fb91a - Init COMPLETE +ywang29-vrdb-test2-worker-0:560836:562487 [6] NCCL INFO ncclCommInitRank comm 0x5604f6b237b0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xf2e66213725fb91a - Init COMPLETE +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:560830:562482 [0] NCCL INFO ncclCommInitRank comm 0x55754519d6f0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xf2e66213725fb91a - Init COMPLETE +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:560833:562488 [3] NCCL INFO ncclCommInitRank comm 0x560f20f65e20 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xf2e66213725fb91a - Init COMPLETE +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:560837:562495 [7] NCCL INFO ncclCommInitRank comm 0x55b7372a2c80 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xf2e66213725fb91a - Init COMPLETE +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:560835:562498 [5] NCCL INFO ncclCommInitRank comm 0x560df2f3ee50 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xf2e66213725fb91a - Init COMPLETE +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:560831:562497 [1] NCCL INFO ncclCommInitRank comm 0x55e6ca5e5060 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xf2e66213725fb91a - Init COMPLETE +[2025-10-10 06:06:45,167] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 06:06:46,858] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +Pre-training init connector._connector.0.scores: Mean=1.000005 +Pre-training init connector._connector.2.scores: Mean=0.999970 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +[2025-10-10 06:06:49,417] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 560830 +[2025-10-10 06:06:49,511] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 560831 +[2025-10-10 06:06:49,511] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 560832 +[2025-10-10 06:06:49,965] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 560833 +[2025-10-10 06:06:49,966] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 560834 +[2025-10-10 06:06:49,967] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 560835 +[2025-10-10 06:06:49,968] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 560836 +[2025-10-10 06:06:49,969] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 560837 +[2025-10-10 06:06:49,970] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_060544.log +Timestamp: 2025-10-10 06:06:51 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_080556.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_080556.log new file mode 100644 index 0000000000000000000000000000000000000000..bc2cb5efca90fdd1d8b8875cc4ad1b555f899f5c --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_080556.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_080556.log +Timestamp: 2025-10-10 08:05:56 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 08:05:58,681] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:01,400] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 08:06:01,401] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 0.9 --temperature_mlp_text 0.9 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 0.9 --temperature_mlp_vision 0.9 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 0.9 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 08:06:03,980] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:05,016] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 08:06:05,016] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 08:06:05,016] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 08:06:05,016] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 08:06:05,016] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 08:06:05,016] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 08:06:05,016] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 08:06:05,018] [INFO] [launch.py:253:main] process 684530 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:06:05,020] [INFO] [launch.py:253:main] process 684531 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:06:05,022] [INFO] [launch.py:253:main] process 684532 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:06:05,024] [INFO] [launch.py:253:main] process 684533 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:06:05,026] [INFO] [launch.py:253:main] process 684534 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:06:05,028] [INFO] [launch.py:253:main] process 684535 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:06:05,030] [INFO] [launch.py:253:main] process 684536 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:06:05,032] [INFO] [launch.py:253:main] process 684537 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '0.9', '--temperature_mlp_text', '0.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '0.9', '--temperature_mlp_vision', '0.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '0.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 08:06:11,773] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:11,816] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:11,925] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:11,971] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:11,971] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:11,971] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:11,980] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:11,983] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:12,172] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:12,218] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:12,334] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:12,377] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:12,377] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 08:06:12,378] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:12,379] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:12,397] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:12,407] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.9, 'temperature_mlp': 0.9, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.9, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.9, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.9, + "temperature_mlp": 0.9, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:684530:684530 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684530:684530 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684530:684530 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:684530:684530 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:684530:684530 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:684530:684530 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test2-worker-0:684536:684536 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:684536:684536 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684536:684536 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684536:684536 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:684536:684536 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:684536:684536 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:684534:684534 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:684534:684534 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684534:684534 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684534:684534 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:684534:684534 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:684534:684534 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:684537:684537 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:684537:684537 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684537:684537 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684537:684537 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:684537:684537 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:684537:684537 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:684531:684531 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:684531:684531 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684532:684532 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:684531:684531 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684532:684532 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684532:684532 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684531:684531 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:684531:684531 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:684531:684531 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:684532:684532 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:684532:684532 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:684532:684532 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:684535:684535 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:684535:684535 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684535:684535 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684535:684535 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:684535:684535 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:684535:684535 [5] NCCL INFO NET/Plugin: Using internal network plugin. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:684533:684533 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:684533:684533 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684533:684533 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684533:684533 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:684533:684533 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:684533:684533 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO ncclCommInitRank comm 0x561ca83a7cc0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x99b95608df1ddb7e - Init START +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO ncclCommInitRank comm 0x5635294372a0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x99b95608df1ddb7e - Init START +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO ncclCommInitRank comm 0x55d40f080f10 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x99b95608df1ddb7e - Init START +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO ncclCommInitRank comm 0x55e537492470 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x99b95608df1ddb7e - Init START +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO ncclCommInitRank comm 0x55f2f4ac8920 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x99b95608df1ddb7e - Init START +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO ncclCommInitRank comm 0x558ea23267e0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x99b95608df1ddb7e - Init START +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO ncclCommInitRank comm 0x55ef5d734da0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x99b95608df1ddb7e - Init START +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO ncclCommInitRank comm 0x5610508c4930 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x99b95608df1ddb7e - Init START +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO comm 0x55ef5d734da0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO comm 0x561ca83a7cc0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO comm 0x558ea23267e0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO comm 0x55e537492470 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO comm 0x55f2f4ac8920 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO comm 0x55d40f080f10 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO comm 0x5610508c4930 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO comm 0x5635294372a0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:684536:686166 [6] NCCL INFO ncclCommInitRank comm 0x55ef5d734da0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x99b95608df1ddb7e - Init COMPLETE +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:684535:686171 [5] NCCL INFO ncclCommInitRank comm 0x55f2f4ac8920 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x99b95608df1ddb7e - Init COMPLETE +ywang29-vrdb-test2-worker-0:684534:686167 [4] NCCL INFO ncclCommInitRank comm 0x55e537492470 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x99b95608df1ddb7e - Init COMPLETE +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:684537:686168 [7] NCCL INFO ncclCommInitRank comm 0x55d40f080f10 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x99b95608df1ddb7e - Init COMPLETE +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:684532:686170 [2] NCCL INFO ncclCommInitRank comm 0x558ea23267e0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x99b95608df1ddb7e - Init COMPLETE +ywang29-vrdb-test2-worker-0:684531:686169 [1] NCCL INFO ncclCommInitRank comm 0x5635294372a0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x99b95608df1ddb7e - Init COMPLETE +ywang29-vrdb-test2-worker-0:684533:686172 [3] NCCL INFO ncclCommInitRank comm 0x561ca83a7cc0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x99b95608df1ddb7e - Init COMPLETE +ywang29-vrdb-test2-worker-0:684530:686165 [0] NCCL INFO ncclCommInitRank comm 0x5610508c4930 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x99b95608df1ddb7e - Init COMPLETE +[2025-10-10 08:06:51,672] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 08:07:45,742] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Pre-training init connector._connector.0.scores: Mean=1.000005 +Pre-training init connector._connector.2.scores: Mean=0.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 08:08:03,982 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 08:08:03,992 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:002->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:684530:691241 [0] NCCL INFO ncclCommInitRank comm 0x7eff8006b3b0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x63bd408aad71fa41 - Init COMPLETE +ywang29-vrdb-test2-worker-0:684536:691243 [6] NCCL INFO ncclCommInitRank comm 0x7fedb006af30 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x63bd408aad71fa41 - Init COMPLETE +ywang29-vrdb-test2-worker-0:684532:691246 [2] NCCL INFO ncclCommInitRank comm 0x7f9e2c06a410 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x63bd408aad71fa41 - Init COMPLETE +ywang29-vrdb-test2-worker-0:684533:691245 [3] NCCL INFO ncclCommInitRank comm 0x7f41a006a700 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x63bd408aad71fa41 - Init COMPLETE +ywang29-vrdb-test2-worker-0:684537:691244 [7] NCCL INFO ncclCommInitRank comm 0x7f5098069db0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x63bd408aad71fa41 - Init COMPLETE +ywang29-vrdb-test2-worker-0:684534:691242 [4] NCCL INFO ncclCommInitRank comm 0x7f08cc06acf0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x63bd408aad71fa41 - Init COMPLETE +ywang29-vrdb-test2-worker-0:684535:691248 [5] NCCL INFO ncclCommInitRank comm 0x7fca3c06a980 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x63bd408aad71fa41 - Init COMPLETE +ywang29-vrdb-test2-worker-0:684531:691247 [1] NCCL INFO ncclCommInitRank comm 0x7fe4e006aa40 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x63bd408aad71fa41 - Init COMPLETE + 0%| | 1/520 [00:14<2:03:06, 14.23s/it] {'loss': 5.1723, 'grad_norm': 0.5333515819090461, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:03:06, 14.23s/it] 0%| | 2/520 [00:17<1:09:07, 8.01s/it] {'loss': 4.6857, 'grad_norm': 0.47741865429596275, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:09:07, 8.01s/it] 1%| | 3/520 [00:21<51:48, 6.01s/it] {'loss': 2.3508, 'grad_norm': 0.09201028730626876, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<51:48, 6.01s/it] 1%| | 4/520 [00:25<44:11, 5.14s/it] {'loss': 2.3624, 'grad_norm': 0.10005640735120858, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<44:11, 5.14s/it] 1%| | 5/520 [00:29<40:12, 4.68s/it] {'loss': 2.184, 'grad_norm': 0.0751181516573952, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:29<40:12, 4.68s/it] 1%| | 6/520 [00:33<37:38, 4.39s/it] {'loss': 1.9685, 'grad_norm': 0.04129104519454663, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:33<37:38, 4.39s/it] 1%|▏ | 7/520 [00:36<36:04, 4.22s/it] {'loss': 1.7499, 'grad_norm': 0.027374323565506665, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<36:04, 4.22s/it] 2%|▏ | 8/520 [00:41<36:42, 4.30s/it] {'loss': 1.9568, 'grad_norm': 0.0841208815849586, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:41<36:42, 4.30s/it] 2%|▏ | 9/520 [00:45<36:41, 4.31s/it] {'loss': 2.3727, 'grad_norm': 0.112851448208696, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:45<36:41, 4.31s/it] 2%|▏ | 10/520 [00:49<35:14, 4.15s/it] {'loss': 1.902, 'grad_norm': 0.05616735478386505, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:49<35:14, 4.15s/it] 2%|▏ | 11/520 [00:53<34:19, 4.05s/it] {'loss': 2.0752, 'grad_norm': 0.04833785564898408, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:53<34:19, 4.05s/it] 2%|▏ | 12/520 [00:56<33:11, 3.92s/it] {'loss': 2.0831, 'grad_norm': 0.045096285737645805, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:56<33:11, 3.92s/it][2025-10-10 08:09:09,607] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:01<34:13, 4.05s/it] {'loss': 1.8267, 'grad_norm': 0.019220780300088103, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:01<34:13, 4.05s/it] 3%|▎ | 14/520 [01:04<33:08, 3.93s/it] {'loss': 1.9144, 'grad_norm': 0.03143358160022317, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:04<33:08, 3.93s/it] 3%|▎ | 15/520 [01:08<32:21, 3.84s/it] {'loss': 2.0592, 'grad_norm': 0.03704945329366272, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:08<32:21, 3.84s/it] 3%|▎ | 16/520 [01:12<31:44, 3.78s/it] {'loss': 1.9657, 'grad_norm': 0.022008576837312558, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:12<31:44, 3.78s/it] 3%|▎ | 17/520 [01:15<31:17, 3.73s/it] {'loss': 1.9471, 'grad_norm': 0.014983003619434762, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:15<31:17, 3.73s/it] 3%|▎ | 18/520 [01:19<31:01, 3.71s/it] {'loss': 1.7569, 'grad_norm': 0.01778719156679583, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:19<31:01, 3.71s/it] 4%|▎ | 19/520 [01:23<30:49, 3.69s/it] {'loss': 2.0865, 'grad_norm': 0.020600061365125263, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:23<30:49, 3.69s/it] 4%|▍ | 20/520 [01:26<30:38, 3.68s/it] {'loss': 1.7682, 'grad_norm': 0.016273971341457385, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:26<30:38, 3.68s/it] 4%|▍ | 21/520 [01:30<30:36, 3.68s/it] {'loss': 2.1814, 'grad_norm': 0.02623351062555112, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:30<30:36, 3.68s/it] 4%|▍ | 22/520 [01:34<30:28, 3.67s/it] {'loss': 1.9458, 'grad_norm': 0.016493346289153367, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:34<30:28, 3.67s/it] 4%|▍ | 23/520 [01:37<30:23, 3.67s/it] {'loss': 1.8646, 'grad_norm': 0.019831374955735535, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:37<30:23, 3.67s/it] 5%|▍ | 24/520 [01:41<30:14, 3.66s/it] {'loss': 2.1577, 'grad_norm': 0.030390161465050414, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:41<30:14, 3.66s/it] 5%|▍ | 25/520 [01:45<30:14, 3.67s/it] {'loss': 1.8786, 'grad_norm': 0.01878679125934144, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:45<30:14, 3.67s/it] 5%|▌ | 26/520 [01:48<30:31, 3.71s/it] {'loss': 1.9041, 'grad_norm': 0.020328996478475223, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:48<30:31, 3.71s/it] 5%|▌ | 27/520 [01:52<30:40, 3.73s/it] {'loss': 1.7488, 'grad_norm': 0.01530829809417331, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:52<30:40, 3.73s/it] 5%|▌ | 28/520 [01:56<30:33, 3.73s/it] {'loss': 1.7145, 'grad_norm': 0.009313760252219304, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:56<30:33, 3.73s/it] 6%|▌ | 29/520 [02:00<30:22, 3.71s/it] {'loss': 1.7328, 'grad_norm': 0.011177022752547611, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:00<30:22, 3.71s/it] 6%|▌ | 30/520 [02:03<30:12, 3.70s/it] {'loss': 2.6435, 'grad_norm': 0.057154675101550256, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:03<30:12, 3.70s/it] 6%|▌ | 31/520 [02:07<29:57, 3.68s/it] {'loss': 1.7483, 'grad_norm': 0.012349203113686624, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:07<29:57, 3.68s/it] 6%|▌ | 32/520 [02:10<29:46, 3.66s/it] {'loss': 2.6968, 'grad_norm': 0.03216005980197707, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:10<29:46, 3.66s/it] 6%|▋ | 33/520 [02:14<29:40, 3.66s/it] {'loss': 2.6672, 'grad_norm': 0.2163437435689256, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:14<29:40, 3.66s/it] 7%|▋ | 34/520 [02:18<29:36, 3.66s/it] {'loss': 2.2188, 'grad_norm': 0.09582572118521478, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:18<29:36, 3.66s/it] 7%|▋ | 35/520 [02:21<29:36, 3.66s/it] {'loss': 3.2524, 'grad_norm': 0.19809569370306623, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:21<29:36, 3.66s/it] 7%|▋ | 36/520 [02:25<29:27, 3.65s/it] {'loss': 2.1909, 'grad_norm': 0.039821628255837016, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:25<29:27, 3.65s/it] 7%|▋ | 37/520 [02:29<29:24, 3.65s/it] {'loss': 2.7587, 'grad_norm': 0.03279852329982306, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:29<29:24, 3.65s/it] 7%|▋ | 38/520 [02:32<29:16, 3.64s/it] {'loss': 2.1835, 'grad_norm': 0.02713516740391716, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:32<29:16, 3.64s/it] 8%|▊ | 39/520 [02:36<29:11, 3.64s/it] {'loss': 2.0095, 'grad_norm': 0.10159672180676575, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:36<29:11, 3.64s/it] 8%|▊ | 40/520 [02:40<29:09, 3.65s/it] {'loss': 1.9607, 'grad_norm': 0.01378879034399883, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:40<29:09, 3.65s/it] 8%|▊ | 41/520 [02:43<29:06, 3.65s/it] {'loss': 1.9005, 'grad_norm': 0.012347732894584561, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:43<29:06, 3.65s/it] 8%|▊ | 42/520 [02:47<29:01, 3.64s/it] {'loss': 1.9773, 'grad_norm': 0.014451722614495389, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:47<29:01, 3.64s/it] 8%|▊ | 43/520 [02:51<29:02, 3.65s/it] {'loss': 2.3986, 'grad_norm': 0.022073607671382098, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:51<29:02, 3.65s/it] 8%|▊ | 44/520 [02:54<28:59, 3.65s/it] {'loss': 2.5724, 'grad_norm': 0.03327268944648284, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:54<28:59, 3.65s/it] 9%|▊ | 45/520 [02:58<28:56, 3.66s/it] {'loss': 1.9595, 'grad_norm': 0.017984824861634625, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [02:58<28:56, 3.66s/it] 9%|▉ | 46/520 [03:02<28:52, 3.66s/it] {'loss': 2.543, 'grad_norm': 0.01813473565109794, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:02<28:52, 3.66s/it] 9%|▉ | 47/520 [03:05<28:48, 3.65s/it] {'loss': 1.9609, 'grad_norm': 0.02006360999444117, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:05<28:48, 3.65s/it] 9%|▉ | 48/520 [03:09<28:44, 3.65s/it] {'loss': 2.0135, 'grad_norm': 0.04732102722719432, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:09<28:44, 3.65s/it] 9%|▉ | 49/520 [03:13<28:34, 3.64s/it] {'loss': 2.2275, 'grad_norm': 0.0523476591211021, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:13<28:34, 3.64s/it] 10%|▉ | 50/520 [03:16<28:28, 3.63s/it] {'loss': 2.6854, 'grad_norm': 0.1340294108414898, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:16<28:28, 3.63s/it] 10%|▉ | 51/520 [03:20<28:25, 3.64s/it] {'loss': 2.6422, 'grad_norm': 0.08105899885351116, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:20<28:25, 3.64s/it] 10%|█ | 52/520 [03:23<28:25, 3.64s/it] {'loss': 2.6047, 'grad_norm': 0.06082986106886785, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:23<28:25, 3.64s/it] 10%|█ | 53/520 [03:27<28:29, 3.66s/it] {'loss': 2.8188, 'grad_norm': 0.07393325700903255, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:27<28:29, 3.66s/it] 10%|█ | 54/520 [03:31<28:22, 3.65s/it] {'loss': 2.4739, 'grad_norm': 0.04299630487632572, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:31<28:22, 3.65s/it] 11%|█ | 55/520 [03:34<28:18, 3.65s/it] {'loss': 2.2812, 'grad_norm': 0.027010823165400287, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:34<28:18, 3.65s/it] 11%|█ | 56/520 [03:38<28:12, 3.65s/it] {'loss': 2.2867, 'grad_norm': 0.01568349908446551, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:38<28:12, 3.65s/it] 11%|█ | 57/520 [03:42<28:06, 3.64s/it] {'loss': 2.1048, 'grad_norm': 0.013361790789198967, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:42<28:06, 3.64s/it] 11%|█ | 58/520 [03:45<28:04, 3.65s/it] {'loss': 2.2213, 'grad_norm': 0.012711036729711985, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:45<28:04, 3.65s/it] 11%|█▏ | 59/520 [03:49<28:02, 3.65s/it] {'loss': 2.7367, 'grad_norm': 0.0855713328541855, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:49<28:02, 3.65s/it] 12%|█▏ | 60/520 [03:53<27:57, 3.65s/it] {'loss': 7.4371, 'grad_norm': 0.9997257163441305, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:53<27:57, 3.65s/it] 12%|█▏ | 61/520 [03:56<27:52, 3.64s/it] {'loss': 3.5419, 'grad_norm': 0.0604191643102295, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:56<27:52, 3.64s/it] 12%|█▏ | 62/520 [04:00<27:48, 3.64s/it] {'loss': 2.4559, 'grad_norm': 0.03958645983424731, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:00<27:48, 3.64s/it] 12%|█▏ | 63/520 [04:04<27:44, 3.64s/it] {'loss': 2.3659, 'grad_norm': 0.038737681704453734, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:04<27:44, 3.64s/it] 12%|█▏ | 64/520 [04:07<27:47, 3.66s/it] {'loss': 2.2647, 'grad_norm': 0.029285641603166972, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:07<27:47, 3.66s/it] 12%|█▎ | 65/520 [04:11<27:46, 3.66s/it] {'loss': 2.1751, 'grad_norm': 0.016379057317077766, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:11<27:46, 3.66s/it] 13%|█▎ | 66/520 [04:15<27:39, 3.66s/it] {'loss': 2.1632, 'grad_norm': 0.015551237446253054, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:15<27:39, 3.66s/it] 13%|█▎ | 67/520 [04:18<27:36, 3.66s/it] {'loss': 1.901, 'grad_norm': 0.009025227966392057, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:18<27:36, 3.66s/it] 13%|█▎ | 68/520 [04:22<27:37, 3.67s/it] {'loss': 1.9277, 'grad_norm': 0.015381051110279407, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:22<27:37, 3.67s/it] 13%|█▎ | 69/520 [04:26<27:54, 3.71s/it] {'loss': 1.9096, 'grad_norm': 0.011059199149182616, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:26<27:54, 3.71s/it] 13%|█▎ | 70/520 [04:30<28:14, 3.77s/it] {'loss': 2.0355, 'grad_norm': 0.01759534313319735, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:30<28:14, 3.77s/it] 14%|█▎ | 71/520 [04:34<28:34, 3.82s/it] {'loss': 1.8614, 'grad_norm': 0.014445949266813921, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:34<28:34, 3.82s/it] 14%|█▍ | 72/520 [04:37<28:40, 3.84s/it] {'loss': 2.0133, 'grad_norm': 0.017253376628405596, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:37<28:40, 3.84s/it] 14%|█▍ | 73/520 [04:41<28:34, 3.83s/it] {'loss': 1.8003, 'grad_norm': 0.009816990596710306, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:41<28:34, 3.83s/it] 14%|█▍ | 74/520 [04:45<28:06, 3.78s/it] {'loss': 1.9236, 'grad_norm': 0.008597546227535236, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:45<28:06, 3.78s/it] 14%|█▍ | 75/520 [04:49<28:04, 3.79s/it] {'loss': 1.787, 'grad_norm': 0.007148320959859571, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:49<28:04, 3.79s/it] 15%|█▍ | 76/520 [04:53<28:13, 3.82s/it] {'loss': 2.9038, 'grad_norm': 0.0387112221445136, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:53<28:13, 3.82s/it] 15%|█▍ | 77/520 [04:56<28:19, 3.84s/it] {'loss': 1.7146, 'grad_norm': 0.00935057646122952, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:56<28:19, 3.84s/it] 15%|█▌ | 78/520 [05:00<28:21, 3.85s/it] {'loss': 1.8563, 'grad_norm': 0.00750500216303848, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:00<28:21, 3.85s/it] 15%|█▌ | 79/520 [05:04<28:20, 3.86s/it] {'loss': 1.8408, 'grad_norm': 0.009062095158494408, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:04<28:20, 3.86s/it] 15%|█▌ | 80/520 [05:08<28:24, 3.87s/it] {'loss': 3.2144, 'grad_norm': 0.034740623878895996, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:08<28:24, 3.87s/it] 16%|█▌ | 81/520 [05:12<28:25, 3.89s/it] {'loss': 2.0861, 'grad_norm': 0.01846133573508254, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:12<28:25, 3.89s/it] 16%|█▌ | 82/520 [05:16<28:21, 3.88s/it] {'loss': 1.938, 'grad_norm': 0.0072765545029191405, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:16<28:21, 3.88s/it] 16%|█▌ | 83/520 [05:20<28:17, 3.88s/it] {'loss': 1.9804, 'grad_norm': 0.012991779976098218, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:20<28:17, 3.88s/it] 16%|█▌ | 84/520 [05:24<28:09, 3.87s/it] {'loss': 1.9378, 'grad_norm': 0.008855786775707385, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:24<28:09, 3.87s/it] 16%|█▋ | 85/520 [05:28<28:06, 3.88s/it] {'loss': 1.9006, 'grad_norm': 0.007041110830011702, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:28<28:06, 3.88s/it] 17%|█▋ | 86/520 [05:31<28:03, 3.88s/it] {'loss': 2.0412, 'grad_norm': 0.009677886052113564, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:31<28:03, 3.88s/it] 17%|█▋ | 87/520 [05:35<28:07, 3.90s/it] {'loss': 2.8271, 'grad_norm': 0.022368439191332013, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:35<28:07, 3.90s/it] 17%|█▋ | 88/520 [05:39<27:37, 3.84s/it] {'loss': 3.8424, 'grad_norm': 0.08061579377318316, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:39<27:37, 3.84s/it] 17%|█▋ | 89/520 [05:43<27:10, 3.78s/it] {'loss': 1.9857, 'grad_norm': 0.03541742572370072, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:43<27:10, 3.78s/it] 17%|█▋ | 90/520 [05:46<26:52, 3.75s/it] {'loss': 1.843, 'grad_norm': 0.010406083548001736, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:46<26:52, 3.75s/it] 18%|█▊ | 91/520 [05:50<26:34, 3.72s/it] {'loss': 1.9472, 'grad_norm': 0.009348949626149195, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:50<26:34, 3.72s/it] 18%|█▊ | 92/520 [05:54<26:21, 3.69s/it] {'loss': 1.8503, 'grad_norm': 0.013053267190983884, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:54<26:21, 3.69s/it] 18%|█▊ | 93/520 [05:57<26:15, 3.69s/it] {'loss': 1.814, 'grad_norm': 0.01830680434499087, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:57<26:15, 3.69s/it] 18%|█▊ | 94/520 [06:01<26:06, 3.68s/it] {'loss': 1.9717, 'grad_norm': 0.013525697089815766, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:01<26:06, 3.68s/it] 18%|█▊ | 95/520 [06:05<25:55, 3.66s/it] {'loss': 1.7881, 'grad_norm': 0.01065942506569022, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:05<25:55, 3.66s/it] 18%|█▊ | 96/520 [06:08<25:47, 3.65s/it] {'loss': 1.7859, 'grad_norm': 0.006591307023617786, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:08<25:47, 3.65s/it] 19%|█▊ | 97/520 [06:12<25:44, 3.65s/it] {'loss': 1.7365, 'grad_norm': 0.007360312105895724, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:12<25:44, 3.65s/it] 19%|█▉ | 98/520 [06:16<25:40, 3.65s/it] {'loss': 1.7479, 'grad_norm': 0.009438028970788161, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:16<25:40, 3.65s/it] 19%|█▉ | 99/520 [06:19<25:39, 3.66s/it] {'loss': 1.7794, 'grad_norm': 0.011060040103507226, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:19<25:39, 3.66s/it] 19%|█▉ | 100/520 [06:23<25:35, 3.66s/it] {'loss': 2.3988, 'grad_norm': 0.01955626481936192, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:23<25:35, 3.66s/it] 19%|█▉ | 101/520 [06:27<25:52, 3.70s/it] {'loss': 1.7477, 'grad_norm': 0.008253466530369891, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:27<25:52, 3.70s/it] 20%|█▉ | 102/520 [06:30<25:42, 3.69s/it] {'loss': 1.7438, 'grad_norm': 0.008703498328281094, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:30<25:42, 3.69s/it] 20%|█▉ | 103/520 [06:34<25:34, 3.68s/it] {'loss': 1.7184, 'grad_norm': 0.005594696540461157, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:34<25:34, 3.68s/it] 20%|██ | 104/520 [06:38<25:25, 3.67s/it] {'loss': 1.7666, 'grad_norm': 0.006466236494025691, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:38<25:25, 3.67s/it] 20%|██ | 105/520 [06:41<25:23, 3.67s/it] {'loss': 1.7529, 'grad_norm': 0.006838949051918998, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:41<25:23, 3.67s/it] 20%|██ | 106/520 [06:45<25:18, 3.67s/it] {'loss': 2.3062, 'grad_norm': 0.018402326623915884, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:45<25:18, 3.67s/it] 21%|██ | 107/520 [06:49<25:14, 3.67s/it] {'loss': 2.2435, 'grad_norm': 0.009787374874875299, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:49<25:14, 3.67s/it] 21%|██ | 108/520 [06:52<25:11, 3.67s/it] {'loss': 1.6904, 'grad_norm': 0.009294998182315434, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:52<25:11, 3.67s/it] 21%|██ | 109/520 [06:56<25:12, 3.68s/it] {'loss': 2.2173, 'grad_norm': 0.015970867668292577, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:56<25:12, 3.68s/it] 21%|██ | 110/520 [07:00<25:07, 3.68s/it] {'loss': 1.8936, 'grad_norm': 0.01234172572497577, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:00<25:07, 3.68s/it] 21%|██▏ | 111/520 [07:03<25:03, 3.68s/it] {'loss': 1.8921, 'grad_norm': 0.009853895937773412, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:03<25:03, 3.68s/it] 22%|██▏ | 112/520 [07:07<25:00, 3.68s/it] {'loss': 1.7633, 'grad_norm': 0.008349006321697645, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:07<25:00, 3.68s/it] 22%|██▏ | 113/520 [07:11<24:56, 3.68s/it] {'loss': 1.5975, 'grad_norm': 0.004779656736905775, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:11<24:56, 3.68s/it] 22%|██▏ | 114/520 [07:14<24:54, 3.68s/it] {'loss': 1.7029, 'grad_norm': 0.005987076534857873, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:14<24:54, 3.68s/it] 22%|██▏ | 115/520 [07:18<24:52, 3.69s/it] {'loss': 1.8678, 'grad_norm': 0.006425913767753594, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:18<24:52, 3.69s/it] 22%|██▏ | 116/520 [07:22<24:47, 3.68s/it] {'loss': 1.814, 'grad_norm': 0.00489548140935285, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:22<24:47, 3.68s/it] 22%|██▎ | 117/520 [07:25<24:40, 3.67s/it] {'loss': 1.8025, 'grad_norm': 0.00841854087960722, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:25<24:40, 3.67s/it] 23%|██▎ | 118/520 [07:29<24:38, 3.68s/it] {'loss': 1.6494, 'grad_norm': 0.00643697888156458, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:29<24:38, 3.68s/it] 23%|██▎ | 119/520 [07:33<24:35, 3.68s/it] {'loss': 1.5866, 'grad_norm': 0.006947044312248428, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:33<24:35, 3.68s/it] 23%|██▎ | 120/520 [07:37<24:28, 3.67s/it] {'loss': 1.61, 'grad_norm': 0.007177123678770517, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:37<24:28, 3.67s/it] 23%|██▎ | 121/520 [07:40<24:25, 3.67s/it] {'loss': 1.6845, 'grad_norm': 0.005331350291928866, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:40<24:25, 3.67s/it] 23%|██▎ | 122/520 [07:44<24:21, 3.67s/it] {'loss': 1.5499, 'grad_norm': 0.004904284156899724, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:44<24:21, 3.67s/it] 24%|██▎ | 123/520 [07:48<24:20, 3.68s/it] {'loss': 2.2185, 'grad_norm': 0.010029664299930513, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:48<24:20, 3.68s/it] 24%|██▍ | 124/520 [07:51<24:16, 3.68s/it] {'loss': 1.6677, 'grad_norm': 0.007091797849845115, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:51<24:16, 3.68s/it] 24%|██▍ | 125/520 [07:55<24:12, 3.68s/it] {'loss': 1.624, 'grad_norm': 0.006179427863029499, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:55<24:12, 3.68s/it] 24%|██▍ | 126/520 [07:59<25:29, 3.88s/it] {'loss': 1.9749, 'grad_norm': 0.006848663671564073, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [07:59<25:29, 3.88s/it] 24%|██▍ | 127/520 [08:03<25:01, 3.82s/it] {'loss': 1.6051, 'grad_norm': 0.009218459557194532, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:03<25:01, 3.82s/it] 25%|██▍ | 128/520 [08:07<24:50, 3.80s/it] {'loss': 1.6836, 'grad_norm': 0.007782970811969626, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:07<24:50, 3.80s/it] 25%|██▍ | 129/520 [08:11<24:48, 3.81s/it] {'loss': 1.5734, 'grad_norm': 0.004837086784255685, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:11<24:48, 3.81s/it] 25%|██▌ | 130/520 [08:14<24:49, 3.82s/it] {'loss': 1.675, 'grad_norm': 0.008545913027408028, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:14<24:49, 3.82s/it] 25%|██▌ | 131/520 [08:18<24:48, 3.83s/it] {'loss': 1.9558, 'grad_norm': 0.010275078254668439, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:18<24:48, 3.83s/it] 25%|██▌ | 132/520 [08:22<24:44, 3.83s/it] {'loss': 1.6995, 'grad_norm': 0.005826070729219464, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:22<24:44, 3.83s/it] 26%|██▌ | 133/520 [08:26<24:41, 3.83s/it] {'loss': 1.5715, 'grad_norm': 0.006885877063408852, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:26<24:41, 3.83s/it] 26%|██▌ | 134/520 [08:30<24:43, 3.84s/it] {'loss': 1.6752, 'grad_norm': 0.007995531712971668, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:30<24:43, 3.84s/it] 26%|██▌ | 135/520 [08:34<24:37, 3.84s/it] {'loss': 1.7573, 'grad_norm': 0.005363804076191582, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:34<24:37, 3.84s/it] 26%|██▌ | 136/520 [08:37<24:34, 3.84s/it] {'loss': 1.6619, 'grad_norm': 0.007501526569014895, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:37<24:34, 3.84s/it] 26%|██▋ | 137/520 [08:41<24:29, 3.84s/it] {'loss': 1.5594, 'grad_norm': 0.007306398961707457, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:41<24:29, 3.84s/it] 27%|██▋ | 138/520 [08:45<24:23, 3.83s/it] {'loss': 1.5756, 'grad_norm': 0.005730106876617571, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:45<24:23, 3.83s/it] 27%|██▋ | 139/520 [08:49<24:18, 3.83s/it] {'loss': 1.8521, 'grad_norm': 0.015501274123835233, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:49<24:18, 3.83s/it] 27%|██▋ | 140/520 [08:53<24:14, 3.83s/it] {'loss': 1.9719, 'grad_norm': 0.006689111165619452, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:53<24:14, 3.83s/it] 27%|██▋ | 141/520 [08:57<24:10, 3.83s/it] {'loss': 1.7159, 'grad_norm': 0.007117622232865862, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:57<24:10, 3.83s/it] 27%|██▋ | 142/520 [09:00<24:05, 3.82s/it] {'loss': 2.0368, 'grad_norm': 0.009657153235667378, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:00<24:05, 3.82s/it] 28%|██▊ | 143/520 [09:04<23:59, 3.82s/it] {'loss': 1.6362, 'grad_norm': 0.006541450085786231, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:04<23:59, 3.82s/it] 28%|██▊ | 144/520 [09:08<23:55, 3.82s/it] {'loss': 1.5377, 'grad_norm': 0.00725269959187204, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:08<23:55, 3.82s/it] 28%|██▊ | 145/520 [09:12<23:53, 3.82s/it] {'loss': 1.4802, 'grad_norm': 0.004346433484159069, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:12<23:53, 3.82s/it] 28%|██▊ | 146/520 [09:16<23:51, 3.83s/it] {'loss': 2.0547, 'grad_norm': 0.01233995322297025, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:16<23:51, 3.83s/it] 28%|██▊ | 147/520 [09:19<23:46, 3.83s/it] {'loss': 1.5349, 'grad_norm': 0.004949333758322428, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:19<23:46, 3.83s/it] 28%|██▊ | 148/520 [09:23<23:43, 3.83s/it] {'loss': 1.5813, 'grad_norm': 0.006025982420461059, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:23<23:43, 3.83s/it] 29%|██▊ | 149/520 [09:27<23:43, 3.84s/it] {'loss': 1.5253, 'grad_norm': 0.006520609649480551, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:27<23:43, 3.84s/it] 29%|██▉ | 150/520 [09:31<23:38, 3.83s/it] {'loss': 1.7654, 'grad_norm': 0.007508608023121063, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:31<23:38, 3.83s/it] 29%|██▉ | 151/520 [09:35<23:32, 3.83s/it] {'loss': 1.5346, 'grad_norm': 0.005177170128256108, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:35<23:32, 3.83s/it] 29%|██▉ | 152/520 [09:39<23:26, 3.82s/it] {'loss': 1.5058, 'grad_norm': 0.006771977763199376, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:39<23:26, 3.82s/it] 29%|██▉ | 153/520 [09:42<23:21, 3.82s/it] {'loss': 1.5495, 'grad_norm': 0.006561560986142062, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:42<23:21, 3.82s/it] 30%|██▉ | 154/520 [09:46<23:23, 3.84s/it] {'loss': 1.6462, 'grad_norm': 0.006452144859790034, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:46<23:23, 3.84s/it] 30%|██▉ | 155/520 [09:50<23:20, 3.84s/it] {'loss': 1.5193, 'grad_norm': 0.0067855103392912945, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:50<23:20, 3.84s/it] 30%|███ | 156/520 [09:54<23:12, 3.82s/it] {'loss': 1.5823, 'grad_norm': 0.006815947609449467, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:54<23:12, 3.82s/it] 30%|███ | 157/520 [09:58<23:07, 3.82s/it] {'loss': 2.0667, 'grad_norm': 0.011527577042670484, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:58<23:07, 3.82s/it] 30%|███ | 158/520 [10:02<23:07, 3.83s/it] {'loss': 1.5439, 'grad_norm': 0.008539045623468043, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:02<23:07, 3.83s/it] 31%|███ | 159/520 [10:05<22:57, 3.82s/it] {'loss': 1.5532, 'grad_norm': 0.0065313452840546305, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:05<22:57, 3.82s/it] 31%|███ | 160/520 [10:09<22:55, 3.82s/it] {'loss': 1.6, 'grad_norm': 0.005195070047469594, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:09<22:55, 3.82s/it] 31%|███ | 161/520 [10:13<22:50, 3.82s/it] {'loss': 1.5896, 'grad_norm': 0.005825564128678027, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:13<22:50, 3.82s/it] 31%|███ | 162/520 [10:17<22:42, 3.81s/it] {'loss': 1.9042, 'grad_norm': 0.009798579393949824, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:17<22:42, 3.81s/it] 31%|███▏ | 163/520 [10:21<22:35, 3.80s/it] {'loss': 1.4408, 'grad_norm': 0.0063368996943025694, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:21<22:35, 3.80s/it] 32%|███▏ | 164/520 [10:24<22:30, 3.79s/it] {'loss': 1.3994, 'grad_norm': 0.004983309256191675, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:24<22:30, 3.79s/it] 32%|███▏ | 165/520 [10:28<22:25, 3.79s/it] {'loss': 1.5339, 'grad_norm': 0.0068315294373937395, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:28<22:25, 3.79s/it] 32%|███▏ | 166/520 [10:32<22:22, 3.79s/it] {'loss': 1.5618, 'grad_norm': 0.006978283486536193, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:32<22:22, 3.79s/it] 32%|███▏ | 167/520 [10:36<22:17, 3.79s/it] {'loss': 1.5674, 'grad_norm': 0.00805287687278985, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:36<22:17, 3.79s/it] 32%|███▏ | 168/520 [10:40<22:16, 3.80s/it] {'loss': 1.4955, 'grad_norm': 0.004824475819036999, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:40<22:16, 3.80s/it] 32%|███▎ | 169/520 [10:43<22:13, 3.80s/it] {'loss': 1.5439, 'grad_norm': 0.004678076992346691, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:43<22:13, 3.80s/it] 33%|███▎ | 170/520 [10:47<22:12, 3.81s/it] {'loss': 1.7729, 'grad_norm': 0.006028950321416496, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:47<22:12, 3.81s/it] 33%|███▎ | 171/520 [10:51<22:09, 3.81s/it] {'loss': 1.4864, 'grad_norm': 0.006724274204013939, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:51<22:09, 3.81s/it] 33%|███▎ | 172/520 [10:55<22:04, 3.81s/it] {'loss': 1.5429, 'grad_norm': 0.004496653698662051, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:55<22:04, 3.81s/it] 33%|███▎ | 173/520 [10:58<21:46, 3.76s/it] {'loss': 1.4727, 'grad_norm': 0.005007242417034523, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:58<21:46, 3.76s/it] 33%|███▎ | 174/520 [11:02<21:34, 3.74s/it] {'loss': 1.5646, 'grad_norm': 0.006500059779130542, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:02<21:34, 3.74s/it] 34%|███▎ | 175/520 [11:06<21:23, 3.72s/it] {'loss': 1.4569, 'grad_norm': 0.0046099889943425855, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:06<21:23, 3.72s/it] 34%|███▍ | 176/520 [11:09<21:17, 3.71s/it] {'loss': 1.8548, 'grad_norm': 0.005222558772496914, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:10<21:17, 3.71s/it] 34%|███▍ | 177/520 [11:13<21:10, 3.71s/it] {'loss': 1.708, 'grad_norm': 0.00584846186131052, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:13<21:10, 3.71s/it] 34%|███▍ | 178/520 [11:17<21:06, 3.70s/it] {'loss': 1.5281, 'grad_norm': 0.005593815833158315, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:17<21:06, 3.70s/it] 34%|███▍ | 179/520 [11:21<20:59, 3.69s/it] {'loss': 1.6269, 'grad_norm': 0.004882181217236904, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:21<20:59, 3.69s/it] 35%|███▍ | 180/520 [11:24<20:52, 3.68s/it] {'loss': 1.5136, 'grad_norm': 0.005180636778198876, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:24<20:52, 3.68s/it] 35%|███▍ | 181/520 [11:28<20:49, 3.69s/it] {'loss': 1.4957, 'grad_norm': 0.005611009056668731, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:28<20:49, 3.69s/it] 35%|███▌ | 182/520 [11:32<20:41, 3.67s/it] {'loss': 1.5099, 'grad_norm': 0.005336615460546546, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:32<20:41, 3.67s/it] 35%|███▌ | 183/520 [11:35<20:36, 3.67s/it] {'loss': 1.5465, 'grad_norm': 0.005090340741663878, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:35<20:36, 3.67s/it] 35%|███▌ | 184/520 [11:39<20:33, 3.67s/it] {'loss': 1.4352, 'grad_norm': 0.005515504779715201, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:39<20:33, 3.67s/it] 36%|███▌ | 185/520 [11:43<20:26, 3.66s/it] {'loss': 1.644, 'grad_norm': 0.005873446421349778, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:43<20:26, 3.66s/it] 36%|███▌ | 186/520 [11:46<20:19, 3.65s/it] {'loss': 1.4488, 'grad_norm': 0.005406856189124581, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:46<20:19, 3.65s/it] 36%|███▌ | 187/520 [11:50<20:18, 3.66s/it] {'loss': 1.4712, 'grad_norm': 0.006360612430407432, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:50<20:18, 3.66s/it] 36%|███▌ | 188/520 [11:53<20:14, 3.66s/it] {'loss': 1.5355, 'grad_norm': 0.006165275986340844, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:53<20:14, 3.66s/it] 36%|███▋ | 189/520 [11:57<20:13, 3.67s/it] {'loss': 1.5719, 'grad_norm': 0.004255194464027671, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:57<20:13, 3.67s/it] 37%|███▋ | 190/520 [12:01<20:06, 3.66s/it] {'loss': 1.4587, 'grad_norm': 0.004623936050093595, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:01<20:06, 3.66s/it] 37%|███▋ | 191/520 [12:04<20:03, 3.66s/it] {'loss': 1.4294, 'grad_norm': 0.005234623759263242, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:04<20:03, 3.66s/it] 37%|███▋ | 192/520 [12:08<20:05, 3.67s/it] {'loss': 1.5344, 'grad_norm': 0.004740959283087698, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:08<20:05, 3.67s/it] 37%|███▋ | 193/520 [12:12<20:00, 3.67s/it] {'loss': 1.8239, 'grad_norm': 0.006327456116299247, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:12<20:00, 3.67s/it] 37%|███▋ | 194/520 [12:16<19:56, 3.67s/it] {'loss': 1.63, 'grad_norm': 0.004720682894709817, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:16<19:56, 3.67s/it] 38%|███▊ | 195/520 [12:19<19:54, 3.68s/it] {'loss': 1.5523, 'grad_norm': 0.006858014512449058, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:19<19:54, 3.68s/it] 38%|███▊ | 196/520 [12:23<19:49, 3.67s/it] {'loss': 1.4924, 'grad_norm': 0.005994291670797836, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:23<19:49, 3.67s/it] 38%|███▊ | 197/520 [12:27<19:46, 3.67s/it] {'loss': 1.4727, 'grad_norm': 0.005207049378618791, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:27<19:46, 3.67s/it] 38%|███▊ | 198/520 [12:30<19:52, 3.70s/it] {'loss': 1.5659, 'grad_norm': 0.00649449171476192, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:30<19:52, 3.70s/it] 38%|███▊ | 199/520 [12:34<19:56, 3.73s/it] {'loss': 1.451, 'grad_norm': 0.004416150675005815, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:34<19:56, 3.73s/it] 38%|███▊ | 200/520 [12:38<19:57, 3.74s/it] {'loss': 1.7121, 'grad_norm': 0.005242631620507903, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:38<19:57, 3.74s/it] 39%|███▊ | 201/520 [12:42<19:56, 3.75s/it] {'loss': 1.6953, 'grad_norm': 0.005843073217194159, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:42<19:56, 3.75s/it] 39%|███▉ | 202/520 [12:45<19:53, 3.75s/it] {'loss': 1.4412, 'grad_norm': 0.005866862046379301, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:45<19:53, 3.75s/it] 39%|███▉ | 203/520 [12:49<19:51, 3.76s/it] {'loss': 1.4895, 'grad_norm': 0.004708765270660171, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:49<19:51, 3.76s/it] 39%|███▉ | 204/520 [12:53<19:49, 3.76s/it] {'loss': 1.5522, 'grad_norm': 0.007870380023818145, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:53<19:49, 3.76s/it] 39%|███▉ | 205/520 [12:57<19:50, 3.78s/it] {'loss': 1.7449, 'grad_norm': 0.008563373658367594, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:57<19:50, 3.78s/it] 40%|███▉ | 206/520 [13:01<19:44, 3.77s/it] {'loss': 1.595, 'grad_norm': 0.0059781334373005405, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:01<19:44, 3.77s/it] 40%|███▉ | 207/520 [13:04<19:39, 3.77s/it] {'loss': 1.6962, 'grad_norm': 0.005272529628922663, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:04<19:39, 3.77s/it] 40%|████ | 208/520 [13:08<19:35, 3.77s/it] {'loss': 1.5438, 'grad_norm': 0.006079078156521035, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:08<19:35, 3.77s/it] 40%|████ | 209/520 [13:12<19:31, 3.77s/it] {'loss': 1.4659, 'grad_norm': 0.005870954859585203, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:12<19:31, 3.77s/it] 40%|████ | 210/520 [13:16<19:29, 3.77s/it] {'loss': 1.5348, 'grad_norm': 0.004696248629626464, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:16<19:29, 3.77s/it] 41%|████ | 211/520 [13:19<19:26, 3.78s/it] {'loss': 1.5509, 'grad_norm': 0.005030473889990745, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:19<19:26, 3.78s/it] 41%|████ | 212/520 [13:23<19:21, 3.77s/it] {'loss': 1.5058, 'grad_norm': 0.004372598532849816, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:23<19:21, 3.77s/it] 41%|████ | 213/520 [13:27<19:18, 3.77s/it] {'loss': 1.4684, 'grad_norm': 0.005494738993424628, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:27<19:18, 3.77s/it] 41%|████ | 214/520 [13:31<19:12, 3.77s/it] {'loss': 1.4855, 'grad_norm': 0.0050982480772078995, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:31<19:12, 3.77s/it] 41%|████▏ | 215/520 [13:34<19:06, 3.76s/it] {'loss': 1.5819, 'grad_norm': 0.005078672529285661, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:34<19:06, 3.76s/it] 42%|████▏ | 216/520 [13:38<19:00, 3.75s/it] {'loss': 1.3659, 'grad_norm': 0.004759956812130988, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:38<19:00, 3.75s/it] 42%|████▏ | 217/520 [13:42<18:48, 3.73s/it] {'loss': 1.5026, 'grad_norm': 0.00536709549824844, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:42<18:48, 3.73s/it] 42%|████▏ | 218/520 [13:45<18:36, 3.70s/it] {'loss': 1.5147, 'grad_norm': 0.006139437216316229, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:45<18:36, 3.70s/it] 42%|████▏ | 219/520 [13:49<18:26, 3.68s/it] {'loss': 1.4655, 'grad_norm': 0.004321637156239748, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:49<18:26, 3.68s/it] 42%|████▏ | 220/520 [13:53<18:19, 3.66s/it] {'loss': 1.6653, 'grad_norm': 0.006761593395294477, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:53<18:19, 3.66s/it] 42%|████▎ | 221/520 [13:56<18:13, 3.66s/it] {'loss': 1.5168, 'grad_norm': 0.004434021720392543, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:56<18:13, 3.66s/it] 43%|████▎ | 222/520 [14:00<18:08, 3.65s/it] {'loss': 1.4052, 'grad_norm': 0.006266366538919137, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:00<18:08, 3.65s/it] 43%|████▎ | 223/520 [14:04<18:10, 3.67s/it] {'loss': 1.392, 'grad_norm': 0.004225673096804953, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:04<18:10, 3.67s/it] 43%|████▎ | 224/520 [14:07<18:17, 3.71s/it] {'loss': 2.0694, 'grad_norm': 0.008830602863294939, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:07<18:17, 3.71s/it] 43%|████▎ | 225/520 [14:11<18:28, 3.76s/it] {'loss': 1.415, 'grad_norm': 0.005079960452792066, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:11<18:28, 3.76s/it] 43%|████▎ | 226/520 [14:15<18:33, 3.79s/it] {'loss': 1.5218, 'grad_norm': 0.005355901147669869, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:15<18:33, 3.79s/it] 44%|████▎ | 227/520 [14:19<18:40, 3.82s/it] {'loss': 1.514, 'grad_norm': 0.004628801055182253, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:19<18:40, 3.82s/it] 44%|████▍ | 228/520 [14:23<18:41, 3.84s/it] {'loss': 1.8531, 'grad_norm': 0.008204216058677008, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:23<18:41, 3.84s/it] 44%|████▍ | 229/520 [14:27<18:41, 3.85s/it] {'loss': 1.4969, 'grad_norm': 0.0045501865780327045, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:27<18:41, 3.85s/it] 44%|████▍ | 230/520 [14:31<18:39, 3.86s/it] {'loss': 1.356, 'grad_norm': 0.004864488048368325, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:31<18:39, 3.86s/it] 44%|████▍ | 231/520 [14:35<18:35, 3.86s/it] {'loss': 1.4189, 'grad_norm': 0.0052245373529068145, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:35<18:35, 3.86s/it] 45%|████▍ | 232/520 [14:39<18:32, 3.86s/it] {'loss': 1.8664, 'grad_norm': 0.005635067298097221, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:39<18:32, 3.86s/it] 45%|████▍ | 233/520 [14:42<18:32, 3.88s/it] {'loss': 1.6892, 'grad_norm': 0.005159719227251007, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:42<18:32, 3.88s/it] 45%|████▌ | 234/520 [14:46<18:26, 3.87s/it] {'loss': 1.361, 'grad_norm': 0.004855357943656436, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:46<18:26, 3.87s/it] 45%|████▌ | 235/520 [14:50<18:24, 3.88s/it] {'loss': 1.4119, 'grad_norm': 0.005583716016056002, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:50<18:24, 3.88s/it] 45%|████▌ | 236/520 [14:54<18:19, 3.87s/it] {'loss': 1.5688, 'grad_norm': 0.005273725614797457, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:54<18:19, 3.87s/it] 46%|████▌ | 237/520 [14:58<18:16, 3.88s/it] {'loss': 1.4974, 'grad_norm': 0.004438500566302966, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:58<18:16, 3.88s/it] 46%|████▌ | 238/520 [15:02<18:11, 3.87s/it] {'loss': 1.4507, 'grad_norm': 0.004550261383026967, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:02<18:11, 3.87s/it] 46%|████▌ | 239/520 [15:06<18:07, 3.87s/it] {'loss': 1.5824, 'grad_norm': 0.005437775241500689, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:06<18:07, 3.87s/it] 46%|████▌ | 240/520 [15:10<18:04, 3.87s/it] {'loss': 1.2909, 'grad_norm': 0.004252468064428511, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:10<18:04, 3.87s/it] 46%|████▋ | 241/520 [15:13<18:03, 3.88s/it] {'loss': 1.3761, 'grad_norm': 0.005051137724892687, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:13<18:03, 3.88s/it] 47%|████▋ | 242/520 [15:17<18:00, 3.89s/it] {'loss': 1.4171, 'grad_norm': 0.003961369534115254, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:17<18:00, 3.89s/it] 47%|████▋ | 243/520 [15:21<17:57, 3.89s/it] {'loss': 1.3974, 'grad_norm': 0.004514244986697117, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:21<17:57, 3.89s/it] 47%|████▋ | 244/520 [15:25<17:50, 3.88s/it] {'loss': 1.5618, 'grad_norm': 0.006728492350056366, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:25<17:50, 3.88s/it] 47%|████▋ | 245/520 [15:29<17:47, 3.88s/it] {'loss': 1.397, 'grad_norm': 0.004766444780800696, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:29<17:47, 3.88s/it] 47%|████▋ | 246/520 [15:33<17:44, 3.88s/it] {'loss': 1.8161, 'grad_norm': 0.005020953807155898, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:33<17:44, 3.88s/it] 48%|████▊ | 247/520 [15:37<17:38, 3.88s/it] {'loss': 1.5715, 'grad_norm': 0.004707602747454531, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:37<17:38, 3.88s/it] 48%|████▊ | 248/520 [15:41<17:31, 3.87s/it] {'loss': 1.3884, 'grad_norm': 0.004688928759520729, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:41<17:31, 3.87s/it] 48%|████▊ | 249/520 [15:44<17:27, 3.87s/it] {'loss': 1.5088, 'grad_norm': 0.004677125152709002, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:44<17:27, 3.87s/it] 48%|████▊ | 250/520 [15:48<17:27, 3.88s/it] {'loss': 1.4615, 'grad_norm': 0.0050624494209059595, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:48<17:27, 3.88s/it] 48%|████▊ | 251/520 [15:52<17:20, 3.87s/it] {'loss': 1.5062, 'grad_norm': 0.0045970027455701145, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:52<17:20, 3.87s/it] 48%|████▊ | 252/520 [15:56<17:18, 3.88s/it] {'loss': 1.661, 'grad_norm': 0.005426225948022496, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:56<17:18, 3.88s/it] 49%|████▊ | 253/520 [16:00<17:08, 3.85s/it] {'loss': 1.5211, 'grad_norm': 0.005077185009530873, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:00<17:08, 3.85s/it] 49%|████▉ | 254/520 [16:04<17:00, 3.84s/it] {'loss': 1.4029, 'grad_norm': 0.003895768854853133, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:04<17:00, 3.84s/it] 49%|████▉ | 255/520 [16:07<16:52, 3.82s/it] {'loss': 1.4254, 'grad_norm': 0.004947330350315361, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:07<16:52, 3.82s/it] 49%|████▉ | 256/520 [16:11<16:46, 3.81s/it] {'loss': 1.4545, 'grad_norm': 0.0044449317314297606, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:11<16:46, 3.81s/it] 49%|████▉ | 257/520 [16:15<16:40, 3.80s/it] {'loss': 1.4755, 'grad_norm': 0.005224520296072854, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:15<16:40, 3.80s/it] 50%|████▉ | 258/520 [16:19<16:27, 3.77s/it] {'loss': 1.4906, 'grad_norm': 0.0038645973769228905, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:19<16:27, 3.77s/it] 50%|████▉ | 259/520 [16:22<16:13, 3.73s/it] {'loss': 1.543, 'grad_norm': 0.004747128219125119, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:22<16:13, 3.73s/it] 50%|█████ | 260/520 [16:26<16:04, 3.71s/it] {'loss': 1.8019, 'grad_norm': 0.005381531093318386, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:26<16:04, 3.71s/it] 50%|█████ | 261/520 [16:30<15:57, 3.70s/it] {'loss': 1.6826, 'grad_norm': 0.004851943139445493, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:30<15:57, 3.70s/it] 50%|█████ | 262/520 [16:33<15:51, 3.69s/it] {'loss': 1.3845, 'grad_norm': 0.004230908439899085, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:33<15:51, 3.69s/it] 51%|█████ | 263/520 [16:37<15:46, 3.68s/it] {'loss': 1.7038, 'grad_norm': 0.004908878237108632, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:37<15:46, 3.68s/it] 51%|█████ | 264/520 [16:41<15:40, 3.67s/it] {'loss': 1.5145, 'grad_norm': 0.004304562920616779, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:41<15:40, 3.67s/it] 51%|█████ | 265/520 [16:44<15:38, 3.68s/it] {'loss': 1.3808, 'grad_norm': 0.004835498443609046, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:44<15:38, 3.68s/it] 51%|█████ | 266/520 [16:48<15:34, 3.68s/it] {'loss': 1.2249, 'grad_norm': 0.004262462790131312, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:48<15:34, 3.68s/it] 51%|█████▏ | 267/520 [16:52<15:30, 3.68s/it] {'loss': 1.3853, 'grad_norm': 0.004396905062063041, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:52<15:30, 3.68s/it] 52%|█████▏ | 268/520 [16:55<15:23, 3.67s/it] {'loss': 1.8234, 'grad_norm': 0.007280353827569198, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:55<15:23, 3.67s/it] 52%|█████▏ | 269/520 [16:59<15:20, 3.67s/it] {'loss': 1.5035, 'grad_norm': 0.0044488066619471, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:59<15:20, 3.67s/it] 52%|█████▏ | 270/520 [17:03<15:17, 3.67s/it] {'loss': 1.5853, 'grad_norm': 0.004975604696329886, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:03<15:17, 3.67s/it] 52%|█████▏ | 271/520 [17:06<15:13, 3.67s/it] {'loss': 1.503, 'grad_norm': 0.004373757294463081, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:06<15:13, 3.67s/it] 52%|█████▏ | 272/520 [17:10<15:09, 3.67s/it] {'loss': 1.5972, 'grad_norm': 0.005835936740283574, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:10<15:09, 3.67s/it] 52%|█████▎ | 273/520 [17:14<15:05, 3.66s/it] {'loss': 1.7299, 'grad_norm': 0.0057404995063403625, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:14<15:05, 3.66s/it] 53%|█████▎ | 274/520 [17:17<15:02, 3.67s/it] {'loss': 1.4345, 'grad_norm': 0.004463744035273109, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:17<15:02, 3.67s/it] 53%|█████▎ | 275/520 [17:21<14:57, 3.66s/it] {'loss': 1.3777, 'grad_norm': 0.005409522160480483, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:21<14:57, 3.66s/it] 53%|█████▎ | 276/520 [17:25<14:53, 3.66s/it] {'loss': 1.483, 'grad_norm': 0.005095623613911461, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:25<14:53, 3.66s/it] 53%|█████▎ | 277/520 [17:28<14:51, 3.67s/it] {'loss': 1.6936, 'grad_norm': 0.004476616676618979, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:28<14:51, 3.67s/it] 53%|█████▎ | 278/520 [17:32<14:48, 3.67s/it] {'loss': 1.3337, 'grad_norm': 0.004023674311010456, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:32<14:48, 3.67s/it] 54%|█████▎ | 279/520 [17:36<14:43, 3.66s/it] {'loss': 1.6359, 'grad_norm': 0.005639924813113334, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:36<14:43, 3.66s/it] 54%|█████▍ | 280/520 [17:39<14:42, 3.68s/it] {'loss': 1.3889, 'grad_norm': 0.005605496965847119, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:39<14:42, 3.68s/it] 54%|█████▍ | 281/520 [17:43<14:37, 3.67s/it] {'loss': 1.5118, 'grad_norm': 0.004809619994922637, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:43<14:37, 3.67s/it] 54%|█████▍ | 282/520 [17:47<14:34, 3.67s/it] {'loss': 1.3445, 'grad_norm': 0.00424491093537938, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:47<14:34, 3.67s/it] 54%|█████▍ | 283/520 [17:50<14:31, 3.68s/it] {'loss': 1.5518, 'grad_norm': 0.0058750627494683965, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:50<14:31, 3.68s/it] 55%|█████▍ | 284/520 [17:54<14:28, 3.68s/it] {'loss': 1.5766, 'grad_norm': 0.005818943117420554, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:54<14:28, 3.68s/it] 55%|█████▍ | 285/520 [17:58<14:26, 3.69s/it] {'loss': 1.3837, 'grad_norm': 0.004408291857252947, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:58<14:26, 3.69s/it] 55%|█████▌ | 286/520 [18:01<14:20, 3.68s/it] {'loss': 1.2245, 'grad_norm': 0.0046588321240800664, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:01<14:20, 3.68s/it] 55%|█████▌ | 287/520 [18:05<14:16, 3.68s/it] {'loss': 1.493, 'grad_norm': 0.004634654043512574, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:05<14:16, 3.68s/it] 55%|█████▌ | 288/520 [18:09<14:15, 3.69s/it] {'loss': 1.5702, 'grad_norm': 0.005187460002755091, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:09<14:15, 3.69s/it] 56%|█████▌ | 289/520 [18:13<14:12, 3.69s/it] {'loss': 1.3915, 'grad_norm': 0.0040872545032540905, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:13<14:12, 3.69s/it] 56%|█████▌ | 290/520 [18:16<14:08, 3.69s/it] {'loss': 1.3035, 'grad_norm': 0.00385918635379397, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:16<14:08, 3.69s/it] 56%|█████▌ | 291/520 [18:20<14:05, 3.69s/it] {'loss': 1.3658, 'grad_norm': 0.004999064314035488, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:20<14:05, 3.69s/it] 56%|█████▌ | 292/520 [18:24<13:59, 3.68s/it] {'loss': 1.4213, 'grad_norm': 0.004092334097176476, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:24<13:59, 3.68s/it] 56%|█████▋ | 293/520 [18:27<13:55, 3.68s/it] {'loss': 1.3494, 'grad_norm': 0.004469256540706159, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:27<13:55, 3.68s/it] 57%|█████▋ | 294/520 [18:31<13:52, 3.68s/it] {'loss': 1.3902, 'grad_norm': 0.004323494548163721, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:31<13:52, 3.68s/it] 57%|█████▋ | 295/520 [18:35<13:47, 3.68s/it] {'loss': 1.6897, 'grad_norm': 0.007182946316370223, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:35<13:47, 3.68s/it] 57%|█████▋ | 296/520 [18:38<13:43, 3.68s/it] {'loss': 1.3199, 'grad_norm': 0.004511306164867966, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:38<13:43, 3.68s/it] 57%|█████▋ | 297/520 [18:42<13:39, 3.68s/it] {'loss': 1.4865, 'grad_norm': 0.004619954220579029, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:42<13:39, 3.68s/it] 57%|█████▋ | 298/520 [18:46<13:35, 3.68s/it] {'loss': 1.4329, 'grad_norm': 0.0038740398120755753, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:46<13:35, 3.68s/it] 57%|█████▊ | 299/520 [18:49<13:34, 3.69s/it] {'loss': 1.6456, 'grad_norm': 0.005345937618449538, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:49<13:34, 3.69s/it] 58%|█████▊ | 300/520 [18:53<13:28, 3.67s/it] {'loss': 1.5025, 'grad_norm': 0.004238630759198192, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:53<13:28, 3.67s/it] 58%|█████▊ | 301/520 [18:57<13:24, 3.67s/it] {'loss': 1.4641, 'grad_norm': 0.0044134684251256025, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:57<13:24, 3.67s/it] 58%|█████▊ | 302/520 [19:00<13:20, 3.67s/it] {'loss': 1.6557, 'grad_norm': 0.004740502077480567, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:00<13:20, 3.67s/it] 58%|█████▊ | 303/520 [19:04<13:17, 3.68s/it] {'loss': 1.3935, 'grad_norm': 0.00470366131443795, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:04<13:17, 3.68s/it] 58%|█████▊ | 304/520 [19:08<13:17, 3.69s/it] {'loss': 1.5625, 'grad_norm': 0.004782878357015417, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:08<13:17, 3.69s/it] 59%|█████▊ | 305/520 [19:11<13:11, 3.68s/it] {'loss': 1.5369, 'grad_norm': 0.005360344524141794, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:11<13:11, 3.68s/it] 59%|█████▉ | 306/520 [19:15<13:07, 3.68s/it] {'loss': 1.4343, 'grad_norm': 0.004850859300858403, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:15<13:07, 3.68s/it] 59%|█████▉ | 307/520 [19:19<13:23, 3.77s/it] {'loss': 1.3824, 'grad_norm': 0.004748910206483882, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:19<13:23, 3.77s/it] 59%|█████▉ | 308/520 [19:23<13:13, 3.74s/it] {'loss': 1.4936, 'grad_norm': 0.0045212363922698786, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:23<13:13, 3.74s/it] 59%|█████▉ | 309/520 [19:26<13:05, 3.72s/it] {'loss': 1.3722, 'grad_norm': 0.004339888162866092, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:26<13:05, 3.72s/it] 60%|█████▉ | 310/520 [19:30<13:00, 3.72s/it] {'loss': 1.3496, 'grad_norm': 0.0044452158900883465, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:30<13:00, 3.72s/it] 60%|█████▉ | 311/520 [19:34<12:54, 3.70s/it] {'loss': 1.3048, 'grad_norm': 0.0041656479806191, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:34<12:54, 3.70s/it] 60%|██████ | 312/520 [19:38<12:51, 3.71s/it] {'loss': 1.2876, 'grad_norm': 0.004490617625348692, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:38<12:51, 3.71s/it] 60%|██████ | 313/520 [19:41<12:46, 3.70s/it] {'loss': 1.2923, 'grad_norm': 0.004107253763559353, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:41<12:46, 3.70s/it] 60%|██████ | 314/520 [19:45<13:06, 3.82s/it] {'loss': 1.3201, 'grad_norm': 0.004941761609838417, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:45<13:06, 3.82s/it] 61%|██████ | 315/520 [19:49<12:58, 3.80s/it] {'loss': 1.7028, 'grad_norm': 0.009766309670849502, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:49<12:58, 3.80s/it] 61%|██████ | 316/520 [19:53<13:15, 3.90s/it] {'loss': 1.2953, 'grad_norm': 0.005681166088503622, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:53<13:15, 3.90s/it] 61%|██████ | 317/520 [19:57<12:58, 3.84s/it] {'loss': 1.3282, 'grad_norm': 0.004113486154060274, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [19:57<12:58, 3.84s/it] 61%|██████ | 318/520 [20:01<12:45, 3.79s/it] {'loss': 1.4575, 'grad_norm': 0.004749601302337836, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:01<12:45, 3.79s/it] 61%|██████▏ | 319/520 [20:05<12:55, 3.86s/it] {'loss': 1.3164, 'grad_norm': 0.0044820733532301, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:05<12:55, 3.86s/it] 62%|██████▏ | 320/520 [20:08<12:40, 3.80s/it] {'loss': 1.2607, 'grad_norm': 0.005058138510131723, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:08<12:40, 3.80s/it] 62%|██████▏ | 321/520 [20:12<12:28, 3.76s/it] {'loss': 1.4833, 'grad_norm': 0.004967329879023514, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:12<12:28, 3.76s/it] 62%|██████▏ | 322/520 [20:16<12:18, 3.73s/it] {'loss': 1.4827, 'grad_norm': 0.005210492174950859, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:16<12:18, 3.73s/it] 62%|██████▏ | 323/520 [20:19<12:09, 3.70s/it] {'loss': 1.5856, 'grad_norm': 0.005335951521559629, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:19<12:09, 3.70s/it] 62%|██████▏ | 324/520 [20:23<12:05, 3.70s/it] {'loss': 1.3931, 'grad_norm': 0.0050526456422162235, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:23<12:05, 3.70s/it] 62%|██████▎ | 325/520 [20:27<12:08, 3.74s/it] {'loss': 1.4137, 'grad_norm': 0.004714838379895745, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:27<12:08, 3.74s/it] 63%|██████▎ | 326/520 [20:31<12:11, 3.77s/it] {'loss': 1.3743, 'grad_norm': 0.004203945761439289, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:31<12:11, 3.77s/it] 63%|██████▎ | 327/520 [20:34<12:10, 3.78s/it] {'loss': 1.6673, 'grad_norm': 0.005594353464021365, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:34<12:10, 3.78s/it] 63%|██████▎ | 328/520 [20:38<12:09, 3.80s/it] {'loss': 1.4731, 'grad_norm': 0.00457874088744891, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:38<12:09, 3.80s/it] 63%|██████▎ | 329/520 [20:42<12:07, 3.81s/it] {'loss': 1.3029, 'grad_norm': 0.0037699437863014298, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:42<12:07, 3.81s/it] 63%|██████▎ | 330/520 [20:46<12:04, 3.81s/it] {'loss': 1.3785, 'grad_norm': 0.003978667401472227, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:46<12:04, 3.81s/it] 64%|██████▎ | 331/520 [20:50<12:02, 3.82s/it] {'loss': 1.3544, 'grad_norm': 0.004252570333982839, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:50<12:02, 3.82s/it] 64%|██████▍ | 332/520 [20:54<11:58, 3.82s/it] {'loss': 1.6494, 'grad_norm': 0.004615861694948321, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:54<11:58, 3.82s/it] 64%|██████▍ | 333/520 [20:57<11:55, 3.83s/it] {'loss': 1.5405, 'grad_norm': 0.004919072822335319, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:57<11:55, 3.83s/it] 64%|██████▍ | 334/520 [21:01<11:50, 3.82s/it] {'loss': 1.3989, 'grad_norm': 0.004994338742970172, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:01<11:50, 3.82s/it] 64%|██████▍ | 335/520 [21:05<11:45, 3.82s/it] {'loss': 1.3903, 'grad_norm': 0.003920096998569975, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:05<11:45, 3.82s/it] 65%|██████▍ | 336/520 [21:09<11:42, 3.82s/it] {'loss': 1.2683, 'grad_norm': 0.00514893166073426, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:09<11:42, 3.82s/it] 65%|██████▍ | 337/520 [21:13<11:37, 3.81s/it] {'loss': 1.2655, 'grad_norm': 0.004706211351409073, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:13<11:37, 3.81s/it] 65%|██████▌ | 338/520 [21:16<11:34, 3.82s/it] {'loss': 1.4153, 'grad_norm': 0.0047219458616035515, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:16<11:34, 3.82s/it] 65%|██████▌ | 339/520 [21:20<11:32, 3.82s/it] {'loss': 1.3383, 'grad_norm': 0.004202325873872259, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:20<11:32, 3.82s/it] 65%|██████▌ | 340/520 [21:24<11:28, 3.82s/it] {'loss': 1.3248, 'grad_norm': 0.004380090573362136, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:24<11:28, 3.82s/it] 66%|██████▌ | 341/520 [21:28<11:24, 3.83s/it] {'loss': 1.355, 'grad_norm': 0.0045671938200312085, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:28<11:24, 3.83s/it] 66%|██████▌ | 342/520 [21:32<11:20, 3.82s/it] {'loss': 1.6116, 'grad_norm': 0.00552262974014909, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:32<11:20, 3.82s/it] 66%|██████▌ | 343/520 [21:36<11:16, 3.82s/it] {'loss': 1.588, 'grad_norm': 0.005523174510513402, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:36<11:16, 3.82s/it] 66%|██████▌ | 344/520 [21:39<11:13, 3.82s/it] {'loss': 1.3019, 'grad_norm': 0.003984262734570954, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:39<11:13, 3.82s/it] 66%|██████▋ | 345/520 [21:43<11:09, 3.82s/it] {'loss': 1.4268, 'grad_norm': 0.004627201970431191, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:43<11:09, 3.82s/it] 67%|██████▋ | 346/520 [21:47<11:05, 3.83s/it] {'loss': 1.5397, 'grad_norm': 0.004641721620531383, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:47<11:05, 3.83s/it] 67%|██████▋ | 347/520 [21:51<11:01, 3.83s/it] {'loss': 1.3073, 'grad_norm': 0.0038637246448824055, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:51<11:01, 3.83s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:55<10:59, 3.83s/it] {'loss': 1.2888, 'grad_norm': 0.005937355776076077, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:55<10:59, 3.83s/it] 67%|██████▋ | 349/520 [21:59<10:56, 3.84s/it] {'loss': 1.3297, 'grad_norm': 0.004373237911561925, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:59<10:56, 3.84s/it] 67%|██████▋ | 350/520 [22:02<10:53, 3.84s/it] {'loss': 1.3576, 'grad_norm': 0.004645078537842775, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:02<10:53, 3.84s/it] 68%|██████▊ | 351/520 [22:06<10:48, 3.84s/it] {'loss': 1.2522, 'grad_norm': 0.003996012946950295, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:06<10:48, 3.84s/it] 68%|██████▊ | 352/520 [22:10<10:44, 3.84s/it] {'loss': 1.3878, 'grad_norm': 0.004373144424459936, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:10<10:44, 3.84s/it] 68%|██████▊ | 353/520 [22:14<10:39, 3.83s/it] {'loss': 1.4941, 'grad_norm': 0.004035072758198736, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:14<10:39, 3.83s/it] 68%|██████▊ | 354/520 [22:18<10:34, 3.82s/it] {'loss': 1.6587, 'grad_norm': 0.004714748746666412, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:18<10:34, 3.82s/it] 68%|██████▊ | 355/520 [22:22<10:29, 3.82s/it] {'loss': 1.326, 'grad_norm': 0.004298251614667608, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:22<10:29, 3.82s/it] 68%|██████▊ | 356/520 [22:25<10:28, 3.83s/it] {'loss': 1.3252, 'grad_norm': 0.004126497788738811, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:25<10:28, 3.83s/it] 69%|██████▊ | 357/520 [22:29<10:18, 3.80s/it] {'loss': 1.3344, 'grad_norm': 0.0038730551075288463, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:29<10:18, 3.80s/it] 69%|██████▉ | 358/520 [22:33<10:10, 3.77s/it] {'loss': 1.2691, 'grad_norm': 0.004103067697958093, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:33<10:10, 3.77s/it] 69%|██████▉ | 359/520 [22:36<10:01, 3.74s/it] {'loss': 1.57, 'grad_norm': 0.00514575872230217, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:36<10:01, 3.74s/it] 69%|██████▉ | 360/520 [22:40<09:54, 3.72s/it] {'loss': 1.5846, 'grad_norm': 0.005284673486504908, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:40<09:54, 3.72s/it] 69%|██████▉ | 361/520 [22:44<09:50, 3.72s/it] {'loss': 1.5694, 'grad_norm': 0.004279961256348576, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:44<09:50, 3.72s/it] 70%|██████▉ | 362/520 [22:48<09:44, 3.70s/it] {'loss': 1.3222, 'grad_norm': 0.004629687348190401, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:48<09:44, 3.70s/it] 70%|██████▉ | 363/520 [22:51<09:39, 3.69s/it] {'loss': 1.3767, 'grad_norm': 0.004385152788110813, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:51<09:39, 3.69s/it] 70%|███████ | 364/520 [22:55<09:36, 3.69s/it] {'loss': 1.5769, 'grad_norm': 0.004959888945771997, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:55<09:36, 3.69s/it] 70%|███████ | 365/520 [22:59<09:30, 3.68s/it] {'loss': 1.4486, 'grad_norm': 0.004714353578537868, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:59<09:30, 3.68s/it] 70%|███████ | 366/520 [23:02<09:27, 3.68s/it] {'loss': 1.3816, 'grad_norm': 0.003996343100998294, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:02<09:27, 3.68s/it] 71%|███████ | 367/520 [23:06<09:24, 3.69s/it] {'loss': 1.3972, 'grad_norm': 0.004277352827710586, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:06<09:24, 3.69s/it] 71%|███████ | 368/520 [23:10<09:21, 3.69s/it] {'loss': 1.2341, 'grad_norm': 0.004210341914505817, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:10<09:21, 3.69s/it] 71%|███████ | 369/520 [23:13<09:17, 3.69s/it] {'loss': 1.5381, 'grad_norm': 0.004573546087597649, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:13<09:17, 3.69s/it] 71%|███████ | 370/520 [23:17<09:13, 3.69s/it] {'loss': 1.2851, 'grad_norm': 0.004240279614529956, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:17<09:13, 3.69s/it] 71%|███████▏ | 371/520 [23:21<09:10, 3.69s/it] {'loss': 1.287, 'grad_norm': 0.004584763621030996, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:21<09:10, 3.69s/it] 72%|███████▏ | 372/520 [23:24<09:06, 3.69s/it] {'loss': 1.6504, 'grad_norm': 0.004494966564917725, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:24<09:06, 3.69s/it] 72%|███████▏ | 373/520 [23:28<09:05, 3.71s/it] {'loss': 1.5155, 'grad_norm': 0.004918370512033141, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:28<09:05, 3.71s/it] 72%|███████▏ | 374/520 [23:32<09:02, 3.72s/it] {'loss': 1.3669, 'grad_norm': 0.00425979218051233, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:32<09:02, 3.72s/it] 72%|███████▏ | 375/520 [23:36<08:57, 3.71s/it] {'loss': 1.2724, 'grad_norm': 0.0044051133263169, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:36<08:57, 3.71s/it] 72%|███████▏ | 376/520 [23:39<08:53, 3.71s/it] {'loss': 1.4112, 'grad_norm': 0.00413639920127433, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:39<08:53, 3.71s/it] 72%|███████▎ | 377/520 [23:43<08:48, 3.70s/it] {'loss': 1.3468, 'grad_norm': 0.005227935625035673, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:43<08:48, 3.70s/it] 73%|███████▎ | 378/520 [23:47<08:46, 3.71s/it] {'loss': 1.396, 'grad_norm': 0.003905284898604886, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:47<08:46, 3.71s/it] 73%|███████▎ | 379/520 [23:50<08:40, 3.69s/it] {'loss': 1.3857, 'grad_norm': 0.003948824550022576, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:50<08:40, 3.69s/it] 73%|███████▎ | 380/520 [23:54<08:36, 3.69s/it] {'loss': 1.6468, 'grad_norm': 0.005163026799524934, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:54<08:36, 3.69s/it] 73%|███████▎ | 381/520 [23:58<08:33, 3.70s/it] {'loss': 1.373, 'grad_norm': 0.0044514422626414476, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:58<08:33, 3.70s/it] 73%|███████▎ | 382/520 [24:01<08:29, 3.69s/it] {'loss': 1.5555, 'grad_norm': 0.005015924044385292, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:01<08:29, 3.69s/it] 74%|███████▎ | 383/520 [24:05<08:24, 3.68s/it] {'loss': 1.2076, 'grad_norm': 0.004479959191072855, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:05<08:24, 3.68s/it] 74%|███████▍ | 384/520 [24:09<08:18, 3.67s/it] {'loss': 1.7738, 'grad_norm': 0.004519975510259663, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:09<08:18, 3.67s/it] 74%|███████▍ | 385/520 [24:12<08:13, 3.65s/it] {'loss': 1.3592, 'grad_norm': 0.004252407625438295, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:12<08:13, 3.65s/it] 74%|███████▍ | 386/520 [24:16<08:08, 3.65s/it] {'loss': 1.2848, 'grad_norm': 0.004274971872818381, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:16<08:08, 3.65s/it] 74%|███████▍ | 387/520 [24:20<08:05, 3.65s/it] {'loss': 1.6565, 'grad_norm': 0.004557483615668021, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:20<08:05, 3.65s/it] 75%|███████▍ | 388/520 [24:23<08:02, 3.65s/it] {'loss': 1.2341, 'grad_norm': 0.0039197856493329635, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:23<08:02, 3.65s/it] 75%|███████▍ | 389/520 [24:27<07:58, 3.65s/it] {'loss': 1.3011, 'grad_norm': 0.005633653746134059, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:27<07:58, 3.65s/it] 75%|███████▌ | 390/520 [24:31<07:54, 3.65s/it] {'loss': 1.3704, 'grad_norm': 0.0038661876177465463, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:31<07:54, 3.65s/it] 75%|███████▌ | 391/520 [24:34<07:56, 3.69s/it] {'loss': 1.4699, 'grad_norm': 0.004327108350067734, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:34<07:56, 3.69s/it] 75%|███████▌ | 392/520 [24:38<08:01, 3.77s/it] {'loss': 1.2622, 'grad_norm': 0.004250849813571159, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:38<08:01, 3.77s/it] 76%|███████▌ | 393/520 [24:42<08:02, 3.80s/it] {'loss': 1.4114, 'grad_norm': 0.004006677261933739, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:42<08:02, 3.80s/it] 76%|███████▌ | 394/520 [24:46<07:57, 3.79s/it] {'loss': 1.3306, 'grad_norm': 0.004796758452221585, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:46<07:57, 3.79s/it] 76%|███████▌ | 395/520 [24:50<07:55, 3.80s/it] {'loss': 1.2776, 'grad_norm': 0.004461813817136512, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:50<07:55, 3.80s/it] 76%|███████▌ | 396/520 [24:54<07:50, 3.79s/it] {'loss': 1.3597, 'grad_norm': 0.004254600448674858, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:54<07:50, 3.79s/it] 76%|███████▋ | 397/520 [24:57<07:48, 3.81s/it] {'loss': 1.371, 'grad_norm': 0.003950496533938901, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:57<07:48, 3.81s/it] 77%|███████▋ | 398/520 [25:01<07:44, 3.81s/it] {'loss': 1.341, 'grad_norm': 0.004213003087998386, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:01<07:44, 3.81s/it] 77%|███████▋ | 399/520 [25:05<07:41, 3.81s/it] {'loss': 1.4694, 'grad_norm': 0.004875300838710812, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:05<07:41, 3.81s/it] 77%|███████▋ | 400/520 [25:09<07:40, 3.84s/it] {'loss': 1.5339, 'grad_norm': 0.004381147820795206, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:09<07:40, 3.84s/it] 77%|███████▋ | 401/520 [25:13<07:40, 3.87s/it] {'loss': 1.1444, 'grad_norm': 0.004418003986907506, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:13<07:40, 3.87s/it] 77%|███████▋ | 402/520 [25:17<07:39, 3.89s/it] {'loss': 1.2781, 'grad_norm': 0.004393083061635238, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:17<07:39, 3.89s/it] 78%|███████▊ | 403/520 [25:21<07:37, 3.91s/it] {'loss': 1.33, 'grad_norm': 0.004599152496239697, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:21<07:37, 3.91s/it] 78%|███████▊ | 404/520 [25:25<07:34, 3.92s/it] {'loss': 1.236, 'grad_norm': 0.005569606612698294, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:25<07:34, 3.92s/it] 78%|███████▊ | 405/520 [25:29<07:32, 3.94s/it] {'loss': 1.4683, 'grad_norm': 0.004136499226553395, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:29<07:32, 3.94s/it] 78%|███████▊ | 406/520 [25:33<07:28, 3.94s/it] {'loss': 1.3985, 'grad_norm': 0.005173943632198156, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:33<07:28, 3.94s/it] 78%|███████▊ | 407/520 [25:37<07:25, 3.94s/it] {'loss': 1.4249, 'grad_norm': 0.004602141214359753, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:37<07:25, 3.94s/it] 78%|███████▊ | 408/520 [25:40<07:16, 3.89s/it] {'loss': 1.298, 'grad_norm': 0.004348392805032514, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:40<07:16, 3.89s/it] 79%|███████▊ | 409/520 [25:44<07:09, 3.87s/it] {'loss': 1.4409, 'grad_norm': 0.004941043040198595, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:44<07:09, 3.87s/it] 79%|███████▉ | 410/520 [25:48<07:03, 3.85s/it] {'loss': 1.1421, 'grad_norm': 0.00391766025586517, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:48<07:03, 3.85s/it] 79%|███████▉ | 411/520 [25:52<06:58, 3.84s/it] {'loss': 1.4126, 'grad_norm': 0.005092576273488251, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:52<06:58, 3.84s/it] 79%|███████▉ | 412/520 [25:56<06:53, 3.83s/it] {'loss': 1.3319, 'grad_norm': 0.004296057045876916, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:56<06:53, 3.83s/it] 79%|███████▉ | 413/520 [25:59<06:49, 3.83s/it] {'loss': 1.5283, 'grad_norm': 0.0046878997767997605, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:59<06:49, 3.83s/it] 80%|███████▉ | 414/520 [26:03<06:45, 3.83s/it] {'loss': 1.2704, 'grad_norm': 0.0038409753270092855, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:03<06:45, 3.83s/it] 80%|███████▉ | 415/520 [26:07<06:41, 3.83s/it] {'loss': 1.2957, 'grad_norm': 0.00397873561253403, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:07<06:41, 3.83s/it] 80%|████████ | 416/520 [26:11<06:37, 3.83s/it] {'loss': 1.2017, 'grad_norm': 0.004803683558437647, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:11<06:37, 3.83s/it] 80%|████████ | 417/520 [26:15<06:34, 3.83s/it] {'loss': 1.3846, 'grad_norm': 0.0047578934027689, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:15<06:34, 3.83s/it] 80%|████████ | 418/520 [26:19<06:30, 3.83s/it] {'loss': 1.3709, 'grad_norm': 0.004108682512384858, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:19<06:30, 3.83s/it] 81%|████████ | 419/520 [26:22<06:26, 3.83s/it] {'loss': 1.3544, 'grad_norm': 0.00440242532658779, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:22<06:26, 3.83s/it] 81%|████████ | 420/520 [26:26<06:22, 3.82s/it] {'loss': 1.2223, 'grad_norm': 0.0043345324167359724, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:26<06:22, 3.82s/it] 81%|████████ | 421/520 [26:30<06:18, 3.82s/it] {'loss': 1.1563, 'grad_norm': 0.004740591325757018, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:30<06:18, 3.82s/it] 81%|████████ | 422/520 [26:34<06:14, 3.82s/it] {'loss': 1.2926, 'grad_norm': 0.00482419819836395, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:34<06:14, 3.82s/it] 81%|████████▏ | 423/520 [26:37<06:05, 3.77s/it] {'loss': 1.2859, 'grad_norm': 0.005109004880932523, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:37<06:05, 3.77s/it] 82%|████████▏ | 424/520 [26:41<05:58, 3.74s/it] {'loss': 1.6159, 'grad_norm': 0.005008876749994769, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:41<05:58, 3.74s/it] 82%|████████▏ | 425/520 [26:45<05:52, 3.71s/it] {'loss': 1.2853, 'grad_norm': 0.004309605217142493, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:45<05:52, 3.71s/it] 82%|████████▏ | 426/520 [26:48<05:46, 3.69s/it] {'loss': 1.3164, 'grad_norm': 0.005560969009842522, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:48<05:46, 3.69s/it] 82%|████████▏ | 427/520 [26:52<05:41, 3.67s/it] {'loss': 1.2137, 'grad_norm': 0.004202297036850799, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:52<05:41, 3.67s/it] 82%|████████▏ | 428/520 [26:56<05:37, 3.66s/it] {'loss': 1.1881, 'grad_norm': 0.004350764316657077, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:56<05:37, 3.66s/it] 82%|████████▎ | 429/520 [27:00<05:39, 3.73s/it] {'loss': 1.2981, 'grad_norm': 0.003932212461612679, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:00<05:39, 3.73s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:03<05:39, 3.77s/it] {'loss': 1.2855, 'grad_norm': 0.003834872623777323, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:03<05:39, 3.77s/it] 83%|████████▎ | 431/520 [27:07<05:39, 3.81s/it] {'loss': 1.4865, 'grad_norm': 0.0044137608625633195, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:07<05:39, 3.81s/it] 83%|████████▎ | 432/520 [27:11<05:36, 3.83s/it] {'loss': 1.2038, 'grad_norm': 0.004853112312343605, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:11<05:36, 3.83s/it] 83%|████████▎ | 433/520 [27:15<05:34, 3.84s/it] {'loss': 1.3535, 'grad_norm': 0.004776571210025366, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:15<05:34, 3.84s/it] 83%|████████▎ | 434/520 [27:19<05:32, 3.87s/it] {'loss': 1.0727, 'grad_norm': 0.004243057215310229, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:19<05:32, 3.87s/it] 84%|████████▎ | 435/520 [27:23<05:27, 3.86s/it] {'loss': 1.3861, 'grad_norm': 0.004545225604229524, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:23<05:27, 3.86s/it] 84%|████████▍ | 436/520 [27:27<05:24, 3.87s/it] {'loss': 1.1576, 'grad_norm': 0.004504840062433448, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:27<05:24, 3.87s/it] 84%|████████▍ | 437/520 [27:30<05:15, 3.80s/it] {'loss': 1.4133, 'grad_norm': 0.004255128123554129, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:30<05:15, 3.80s/it] 84%|████████▍ | 438/520 [27:34<05:08, 3.76s/it] {'loss': 1.1884, 'grad_norm': 0.00395466778993456, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:34<05:08, 3.76s/it] 84%|████████▍ | 439/520 [27:38<05:01, 3.73s/it] {'loss': 1.4313, 'grad_norm': 0.003662915339598637, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:38<05:01, 3.73s/it] 85%|████████▍ | 440/520 [27:41<04:56, 3.71s/it] {'loss': 1.2613, 'grad_norm': 0.004028419522609384, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:41<04:56, 3.71s/it] 85%|████████▍ | 441/520 [27:45<04:51, 3.69s/it] {'loss': 1.4659, 'grad_norm': 0.004038465575630366, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:45<04:51, 3.69s/it] 85%|████████▌ | 442/520 [27:49<04:48, 3.69s/it] {'loss': 1.3027, 'grad_norm': 0.005022235560774127, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:49<04:48, 3.69s/it] 85%|████████▌ | 443/520 [27:52<04:43, 3.68s/it] {'loss': 1.3335, 'grad_norm': 0.004354060226341623, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:52<04:43, 3.68s/it] 85%|████████▌ | 444/520 [27:56<04:39, 3.68s/it] {'loss': 1.301, 'grad_norm': 0.0037512271518367916, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:56<04:39, 3.68s/it] 86%|████████▌ | 445/520 [28:00<04:35, 3.68s/it] {'loss': 1.211, 'grad_norm': 0.005179272811172209, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:00<04:35, 3.68s/it] 86%|████████▌ | 446/520 [28:03<04:32, 3.68s/it] {'loss': 1.5566, 'grad_norm': 0.0041791437442879045, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:03<04:32, 3.68s/it] 86%|████████▌ | 447/520 [28:07<04:29, 3.69s/it] {'loss': 1.3191, 'grad_norm': 0.004356198201646996, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:07<04:29, 3.69s/it] 86%|████████▌ | 448/520 [28:11<04:24, 3.68s/it] {'loss': 1.2826, 'grad_norm': 0.004248512995054681, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:11<04:24, 3.68s/it] 86%|████████▋ | 449/520 [28:14<04:20, 3.67s/it] {'loss': 1.5134, 'grad_norm': 0.004679953844553764, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:14<04:20, 3.67s/it] 87%|████████▋ | 450/520 [28:18<04:16, 3.66s/it] {'loss': 1.3433, 'grad_norm': 0.004337851671350215, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:18<04:16, 3.66s/it] 87%|████████▋ | 451/520 [28:22<04:12, 3.66s/it] {'loss': 1.3345, 'grad_norm': 0.004954979717375706, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:22<04:12, 3.66s/it] 87%|████████▋ | 452/520 [28:25<04:08, 3.66s/it] {'loss': 1.5435, 'grad_norm': 0.0042582345783633265, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:25<04:08, 3.66s/it] 87%|████████▋ | 453/520 [28:29<04:05, 3.67s/it] {'loss': 1.4915, 'grad_norm': 0.004537044195102866, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:29<04:05, 3.67s/it] 87%|████████▋ | 454/520 [28:33<04:01, 3.67s/it] {'loss': 1.2357, 'grad_norm': 0.004468460782694782, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:33<04:01, 3.67s/it] 88%|████████▊ | 455/520 [28:36<03:57, 3.66s/it] {'loss': 1.3735, 'grad_norm': 0.004146915613435353, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:36<03:57, 3.66s/it] 88%|████████▊ | 456/520 [28:40<03:54, 3.66s/it] {'loss': 1.2809, 'grad_norm': 0.004331197555380655, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:40<03:54, 3.66s/it] 88%|████████▊ | 457/520 [28:44<03:51, 3.67s/it] {'loss': 1.598, 'grad_norm': 0.00407649572811508, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:44<03:51, 3.67s/it] 88%|████████▊ | 458/520 [28:47<03:48, 3.68s/it] {'loss': 1.4423, 'grad_norm': 0.0044306358105767885, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:47<03:48, 3.68s/it] 88%|████████▊ | 459/520 [28:51<03:44, 3.69s/it] {'loss': 1.3578, 'grad_norm': 0.004291119280424658, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:51<03:44, 3.69s/it] 88%|████████▊ | 460/520 [28:55<03:40, 3.68s/it] {'loss': 1.2293, 'grad_norm': 0.00404030198556583, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:55<03:40, 3.68s/it] 89%|████████▊ | 461/520 [28:58<03:37, 3.69s/it] {'loss': 1.6459, 'grad_norm': 0.0038487974064286305, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:58<03:37, 3.69s/it] 89%|████████▉ | 462/520 [29:02<03:34, 3.70s/it] {'loss': 1.5961, 'grad_norm': 0.004204412473331877, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:02<03:34, 3.70s/it] 89%|████████▉ | 463/520 [29:06<03:30, 3.69s/it] {'loss': 1.1886, 'grad_norm': 0.005035370940039482, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:06<03:30, 3.69s/it] 89%|████████▉ | 464/520 [29:10<03:26, 3.69s/it] {'loss': 1.3631, 'grad_norm': 0.0045855662184092864, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:10<03:26, 3.69s/it] 89%|████████▉ | 465/520 [29:13<03:22, 3.69s/it] {'loss': 1.4654, 'grad_norm': 0.004879143159745803, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:13<03:22, 3.69s/it] 90%|████████▉ | 466/520 [29:17<03:19, 3.70s/it] {'loss': 1.3292, 'grad_norm': 0.003924314340338417, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:17<03:19, 3.70s/it] 90%|████████▉ | 467/520 [29:21<03:16, 3.70s/it] {'loss': 1.4517, 'grad_norm': 0.004410759548322474, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:21<03:16, 3.70s/it] 90%|█████████ | 468/520 [29:24<03:12, 3.70s/it] {'loss': 1.313, 'grad_norm': 0.004820159025459572, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:24<03:12, 3.70s/it] 90%|█████████ | 469/520 [29:28<03:08, 3.69s/it] {'loss': 1.3719, 'grad_norm': 0.004497198453476226, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:28<03:08, 3.69s/it] 90%|█████████ | 470/520 [29:32<03:04, 3.68s/it] {'loss': 1.233, 'grad_norm': 0.0037858234890557844, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:32<03:04, 3.68s/it] 91%|█████████ | 471/520 [29:35<02:59, 3.67s/it] {'loss': 1.2537, 'grad_norm': 0.004451150981392472, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:35<02:59, 3.67s/it] 91%|█████████ | 472/520 [29:39<02:56, 3.67s/it] {'loss': 1.224, 'grad_norm': 0.0042178329493546395, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:39<02:56, 3.67s/it] 91%|█████████ | 473/520 [29:43<02:52, 3.66s/it] {'loss': 1.2778, 'grad_norm': 0.00425460094774315, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:43<02:52, 3.66s/it] 91%|█████████ | 474/520 [29:46<02:48, 3.67s/it] {'loss': 1.4821, 'grad_norm': 0.004254206474132037, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:46<02:48, 3.67s/it] 91%|█████████▏| 475/520 [29:50<02:44, 3.66s/it] {'loss': 1.4069, 'grad_norm': 0.004056413097080938, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:50<02:44, 3.66s/it] 92%|█████████▏| 476/520 [29:54<02:41, 3.66s/it] {'loss': 1.2825, 'grad_norm': 0.004424970532863707, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:54<02:41, 3.66s/it] 92%|█████████▏| 477/520 [29:57<02:38, 3.68s/it] {'loss': 1.2641, 'grad_norm': 0.0053781961799114, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:57<02:38, 3.68s/it] 92%|█████████▏| 478/520 [30:01<02:34, 3.67s/it] {'loss': 1.223, 'grad_norm': 0.004181312769751865, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:01<02:34, 3.67s/it] 92%|█████████▏| 479/520 [30:05<02:30, 3.67s/it] {'loss': 1.4923, 'grad_norm': 0.004499171908674559, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:05<02:30, 3.67s/it] 92%|█████████▏| 480/520 [30:08<02:26, 3.67s/it] {'loss': 1.4769, 'grad_norm': 0.004643322660611298, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:08<02:26, 3.67s/it] 92%|█████████▎| 481/520 [30:12<02:23, 3.67s/it] {'loss': 1.5269, 'grad_norm': 0.004337066232239676, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:12<02:23, 3.67s/it] 93%|█████████▎| 482/520 [30:16<02:19, 3.66s/it] {'loss': 1.5132, 'grad_norm': 0.004436579337085861, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:16<02:19, 3.66s/it] 93%|█████████▎| 483/520 [30:19<02:15, 3.67s/it] {'loss': 1.319, 'grad_norm': 0.004515756891028613, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:19<02:15, 3.67s/it] 93%|█████████▎| 484/520 [30:23<02:12, 3.68s/it] {'loss': 1.3122, 'grad_norm': 0.004603928398015242, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:23<02:12, 3.68s/it] 93%|█████████▎| 485/520 [30:27<02:08, 3.68s/it] {'loss': 1.248, 'grad_norm': 0.003983946779850344, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:27<02:08, 3.68s/it] 93%|█████████▎| 486/520 [30:30<02:04, 3.67s/it] {'loss': 1.3753, 'grad_norm': 0.004318016987289112, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:30<02:04, 3.67s/it] 94%|█████████▎| 487/520 [30:34<02:01, 3.67s/it] {'loss': 1.2318, 'grad_norm': 0.004058426085790688, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:34<02:01, 3.67s/it] 94%|█████████▍| 488/520 [30:38<01:57, 3.68s/it] {'loss': 1.1613, 'grad_norm': 0.004278452340950686, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:38<01:57, 3.68s/it] 94%|█████████▍| 489/520 [30:41<01:53, 3.67s/it] {'loss': 1.4993, 'grad_norm': 0.0040037240389608, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:41<01:53, 3.67s/it] 94%|█████████▍| 490/520 [30:45<01:49, 3.66s/it] {'loss': 1.303, 'grad_norm': 0.004654588251380239, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:45<01:49, 3.66s/it] 94%|█████████▍| 491/520 [30:49<01:46, 3.66s/it] {'loss': 1.2463, 'grad_norm': 0.004166838611848611, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:49<01:46, 3.66s/it] 95%|█████████▍| 492/520 [30:52<01:42, 3.65s/it] {'loss': 1.3886, 'grad_norm': 0.005289466872173628, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:52<01:42, 3.65s/it] 95%|█████████▍| 493/520 [30:56<01:38, 3.66s/it] {'loss': 1.5992, 'grad_norm': 0.004481697882821448, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:56<01:38, 3.66s/it] 95%|█████████▌| 494/520 [31:00<01:35, 3.68s/it] {'loss': 1.3178, 'grad_norm': 0.003927943006732784, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:00<01:35, 3.68s/it] 95%|█████████▌| 495/520 [31:03<01:31, 3.66s/it] {'loss': 1.2586, 'grad_norm': 0.004290680273245325, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:03<01:31, 3.66s/it] 95%|█████████▌| 496/520 [31:07<01:28, 3.68s/it] {'loss': 1.1858, 'grad_norm': 0.0041490482203160916, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:07<01:28, 3.68s/it] 96%|█████████▌| 497/520 [31:11<01:24, 3.66s/it] {'loss': 1.4172, 'grad_norm': 0.0038911791519593212, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:11<01:24, 3.66s/it] 96%|█████████▌| 498/520 [31:14<01:21, 3.70s/it] {'loss': 1.2666, 'grad_norm': 0.004404832527034764, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:14<01:21, 3.70s/it] 96%|█████████▌| 499/520 [31:18<01:19, 3.77s/it] {'loss': 1.5643, 'grad_norm': 0.004341628697860923, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:18<01:19, 3.77s/it] 96%|█████████▌| 500/520 [31:22<01:15, 3.79s/it] {'loss': 1.3921, 'grad_norm': 0.005122235261869913, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:22<01:15, 3.79s/it] 96%|█████████▋| 501/520 [31:26<01:11, 3.74s/it] {'loss': 1.5145, 'grad_norm': 0.0067175687085834785, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:26<01:11, 3.74s/it] 97%|█████████▋| 502/520 [31:30<01:07, 3.73s/it] {'loss': 1.3085, 'grad_norm': 0.004243718163776314, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:30<01:07, 3.73s/it] 97%|█████████▋| 503/520 [31:33<01:03, 3.71s/it] {'loss': 1.4485, 'grad_norm': 0.004425766208487635, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:33<01:03, 3.71s/it] 97%|█████████▋| 504/520 [31:37<00:59, 3.70s/it] {'loss': 1.3067, 'grad_norm': 0.004717411625710913, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:37<00:59, 3.70s/it] 97%|█████████▋| 505/520 [31:41<00:55, 3.68s/it] {'loss': 1.3603, 'grad_norm': 0.004133334426514829, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:41<00:55, 3.68s/it] 97%|█████████▋| 506/520 [31:44<00:51, 3.67s/it] {'loss': 1.2546, 'grad_norm': 0.004726521625914951, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:44<00:51, 3.67s/it] 98%|█████████▊| 507/520 [31:48<00:47, 3.67s/it] {'loss': 1.6082, 'grad_norm': 0.004161585809146886, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:48<00:47, 3.67s/it] 98%|█████████▊| 508/520 [31:52<00:43, 3.67s/it] {'loss': 1.3792, 'grad_norm': 0.004221338318702687, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:52<00:43, 3.67s/it] 98%|█████████▊| 509/520 [31:55<00:40, 3.66s/it] {'loss': 1.3526, 'grad_norm': 0.004108964161608344, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:55<00:40, 3.66s/it] 98%|█████████▊| 510/520 [31:59<00:36, 3.66s/it] {'loss': 1.3091, 'grad_norm': 0.004083519081348961, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [31:59<00:36, 3.66s/it] 98%|█████████▊| 511/520 [32:03<00:33, 3.72s/it] {'loss': 1.2746, 'grad_norm': 0.0038815545645649427, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:03<00:33, 3.72s/it] 98%|█████████▊| 512/520 [32:07<00:30, 3.78s/it] {'loss': 1.1533, 'grad_norm': 0.0040221699628076885, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:07<00:30, 3.78s/it] 99%|█████████▊| 513/520 [32:11<00:26, 3.82s/it] {'loss': 1.3647, 'grad_norm': 0.004531232898496116, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:11<00:26, 3.82s/it] 99%|█████████▉| 514/520 [32:14<00:23, 3.85s/it] {'loss': 1.3448, 'grad_norm': 0.0038057702599752228, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:14<00:23, 3.85s/it] 99%|█████████▉| 515/520 [32:18<00:19, 3.86s/it] {'loss': 1.4031, 'grad_norm': 0.004770749756011515, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:18<00:19, 3.86s/it] 99%|█████████▉| 516/520 [32:22<00:15, 3.87s/it] {'loss': 1.2586, 'grad_norm': 0.004086390619715339, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:22<00:15, 3.87s/it] 99%|█████████▉| 517/520 [32:26<00:11, 3.86s/it] {'loss': 1.53, 'grad_norm': 0.004606627293832098, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:26<00:11, 3.86s/it] 100%|█████████▉| 518/520 [32:30<00:07, 3.86s/it] {'loss': 1.31, 'grad_norm': 0.004225975249157881, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:30<00:07, 3.86s/it] 100%|█████████▉| 519/520 [32:34<00:03, 3.85s/it] {'loss': 1.4774, 'grad_norm': 0.004150748348809536, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:34<00:03, 3.85s/it] 100%|██████████| 520/520 [32:39<00:00, 4.14s/it] {'loss': 1.61, 'grad_norm': 0.004703244842331866, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:39<00:00, 4.14s/it] {'train_runtime': 1959.0832, 'train_samples_per_second': 33.959, 'train_steps_per_second': 0.265, 'train_loss': 1.6287698092368932, 'epoch': 1.0} + 100%|██████████| 520/520 [32:39<00:00, 4.14s/it] 100%|██████████| 520/520 [32:39<00:00, 3.77s/it] +[2025-10-10 08:40:53,250] [INFO] [launch.py:348:main] Process 684531 exits successfully. +[2025-10-10 08:40:53,251] [INFO] [launch.py:348:main] Process 684534 exits successfully. +[2025-10-10 08:40:54,252] [INFO] [launch.py:348:main] Process 684532 exits successfully. +[2025-10-10 08:40:54,253] [INFO] [launch.py:348:main] Process 684533 exits successfully. +[2025-10-10 08:40:54,253] [INFO] [launch.py:348:main] Process 684537 exits successfully. +[2025-10-10 08:40:54,254] [INFO] [launch.py:348:main] Process 684536 exits successfully. +[2025-10-10 08:40:54,254] [INFO] [launch.py:348:main] Process 684535 exits successfully. +[2025-10-10 08:40:58,259] [INFO] [launch.py:348:main] Process 684530 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_0.9_2e-1_connector-1.0_0.9_2e-1_ablation_20251010_080556.log +Timestamp: 2025-10-10 08:41:00 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_060651.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_060651.log new file mode 100644 index 0000000000000000000000000000000000000000..4983b8093a0cedd04cbe64fa5987c6551bd3482a --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_060651.log @@ -0,0 +1,1167 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_060651.log +Timestamp: 2025-10-10 06:06:51 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:06:53,813] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:06:56,525] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 06:06:56,526] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 1.1 --temperature_mlp_text 1.1 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 1.1 --temperature_mlp_vision 1.1 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 1.1 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:06:59,131] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:07:00,202] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 06:07:00,202] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 06:07:00,203] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 06:07:00,203] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 06:07:00,203] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 06:07:00,203] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 06:07:00,203] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 06:07:00,205] [INFO] [launch.py:253:main] process 562815 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:07:00,207] [INFO] [launch.py:253:main] process 562816 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:07:00,209] [INFO] [launch.py:253:main] process 562817 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:07:00,211] [INFO] [launch.py:253:main] process 562818 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:07:00,213] [INFO] [launch.py:253:main] process 562819 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:07:00,215] [INFO] [launch.py:253:main] process 562820 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:07:00,217] [INFO] [launch.py:253:main] process 562821 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:07:00,219] [INFO] [launch.py:253:main] process 562822 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:07:06,748] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:07:06,855] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:07:07,118] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:07:07,118] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:07:07,126] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:07:07,144] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:07:07,147] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:07:07,158] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:07:07,166] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:07:07,252] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:07:07,528] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:07:07,528] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:07:07,529] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:07:07,529] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 06:07:07,551] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:07:07,553] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:07:07,568] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.1, 'temperature_mlp': 1.1, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.1, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.1, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.1, + "temperature_mlp": 1.1, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:562815:562815 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562815:562815 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562815:562815 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:562815:562815 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:562815:562815 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:562815:562815 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test2-worker-0:562816:562816 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:562816:562816 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562816:562816 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562816:562816 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:562816:562816 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:562816:562816 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:562822:562822 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:562822:562822 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562822:562822 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562822:562822 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:562822:562822 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:562822:562822 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:562817:562817 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:562817:562817 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562817:562817 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562817:562817 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:562817:562817 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:562817:562817 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:562818:562818 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:562818:562818 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562818:562818 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562821:562821 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:562821:562821 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562821:562821 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562818:562818 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:562818:562818 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:562818:562818 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:562821:562821 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:562821:562821 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:562821:562821 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:562820:562820 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:562820:562820 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562820:562820 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562820:562820 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:562820:562820 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:562820:562820 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:562819:562819 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:562819:562819 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562819:562819 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562819:562819 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:562819:562819 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:562819:562819 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO ncclCommInitRank comm 0x5565263f8f60 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xc4da55c95153cc10 - Init START +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO ncclCommInitRank comm 0x55d36e7131f0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xc4da55c95153cc10 - Init START +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO ncclCommInitRank comm 0x55c599266260 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xc4da55c95153cc10 - Init START +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO ncclCommInitRank comm 0x555c13cdb2a0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xc4da55c95153cc10 - Init START +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO ncclCommInitRank comm 0x564de331fe80 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xc4da55c95153cc10 - Init START +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO ncclCommInitRank comm 0x560f575a55e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xc4da55c95153cc10 - Init START +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO ncclCommInitRank comm 0x562f29a2ce70 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xc4da55c95153cc10 - Init START +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO ncclCommInitRank comm 0x55b2aa3b4cd0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xc4da55c95153cc10 - Init START +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO comm 0x55b2aa3b4cd0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO comm 0x562f29a2ce70 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO comm 0x564de331fe80 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO comm 0x560f575a55e0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO comm 0x55c599266260 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO comm 0x555c13cdb2a0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO comm 0x5565263f8f60 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO comm 0x55d36e7131f0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:562820:564482 [5] NCCL INFO ncclCommInitRank comm 0x555c13cdb2a0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xc4da55c95153cc10 - Init COMPLETE +ywang29-vrdb-test2-worker-0:562819:564483 [4] NCCL INFO ncclCommInitRank comm 0x5565263f8f60 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xc4da55c95153cc10 - Init COMPLETE +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:562821:564478 [6] NCCL INFO ncclCommInitRank comm 0x560f575a55e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xc4da55c95153cc10 - Init COMPLETE +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:562818:564476 [3] NCCL INFO ncclCommInitRank comm 0x55d36e7131f0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xc4da55c95153cc10 - Init COMPLETE +ywang29-vrdb-test2-worker-0:562822:564475 [7] NCCL INFO ncclCommInitRank comm 0x55c599266260 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xc4da55c95153cc10 - Init COMPLETE +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:562817:564477 [2] NCCL INFO ncclCommInitRank comm 0x55b2aa3b4cd0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xc4da55c95153cc10 - Init COMPLETE +ywang29-vrdb-test2-worker-0:562816:564474 [1] NCCL INFO ncclCommInitRank comm 0x564de331fe80 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xc4da55c95153cc10 - Init COMPLETE +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:562815:564473 [0] NCCL INFO ncclCommInitRank comm 0x562f29a2ce70 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xc4da55c95153cc10 - Init COMPLETE +[2025-10-10 06:07:45,995] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 06:07:47,797] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() +train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +Pre-training init connector._connector.0.scores: Mean=1.000005 +Pre-training init connector._connector.2.scores: Mean=0.999970 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/root/dataset/text_files/llava_v1_5_mix665k.json' +[2025-10-10 06:07:50,274] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 562815 +[2025-10-10 06:07:50,528] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 562816 +[2025-10-10 06:07:50,942] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 562817 +[2025-10-10 06:07:50,943] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 562818 +[2025-10-10 06:07:50,944] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 562819 +[2025-10-10 06:07:50,945] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 562820 +[2025-10-10 06:07:51,037] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 562821 +[2025-10-10 06:07:51,039] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 562822 +[2025-10-10 06:07:51,040] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_060651.log +Timestamp: 2025-10-10 06:07:52 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_084100.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_084100.log new file mode 100644 index 0000000000000000000000000000000000000000..7a99b24f160eff4f472a006fe9382072da4125c7 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_084100.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_084100.log +Timestamp: 2025-10-10 08:41:00 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 08:41:03,379] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:06,359] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 08:41:06,361] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 1.1 --temperature_mlp_text 1.1 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 1.1 --temperature_mlp_vision 1.1 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 1.1 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 08:41:08,942] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:09,973] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 08:41:09,973] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 08:41:09,974] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 08:41:09,974] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 08:41:09,974] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 08:41:09,974] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 08:41:09,974] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 08:41:09,976] [INFO] [launch.py:253:main] process 706997 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:09,978] [INFO] [launch.py:253:main] process 706998 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:09,980] [INFO] [launch.py:253:main] process 706999 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:09,982] [INFO] [launch.py:253:main] process 707000 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:09,984] [INFO] [launch.py:253:main] process 707001 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:09,986] [INFO] [launch.py:253:main] process 707002 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:09,988] [INFO] [launch.py:253:main] process 707003 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:09,990] [INFO] [launch.py:253:main] process 707004 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.1', '--temperature_mlp_text', '1.1', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.1', '--temperature_mlp_vision', '1.1', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.1', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 08:41:16,720] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:16,984] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:16,985] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:17,089] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:17,094] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:17,094] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:17,107] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:17,109] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:17,131] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:17,385] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:17,385] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:17,500] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:17,502] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:17,503] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:17,510] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:17,516] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:17,516] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.1, 'temperature_mlp': 1.1, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.1, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.1, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.1, + "temperature_mlp": 1.1, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:706997:706997 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:706997:706997 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:706997:706997 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:706997:706997 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:706997:706997 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:706997:706997 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:706999:706999 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:706999:706999 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:706999:706999 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:706999:706999 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:706999:706999 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:706999:706999 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:707002:707002 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:707002:707002 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707002:707002 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:707000:707000 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:707000:707000 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707000:707000 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:707002:707002 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:707002:707002 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:707002:707002 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:707000:707000 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:707000:707000 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:707000:707000 [3] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:707001:707001 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:707001:707001 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707001:707001 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:707001:707001 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:707001:707001 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:707001:707001 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:707004:707004 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:707004:707004 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707004:707004 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:707004:707004 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:707004:707004 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:707004:707004 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:706998:706998 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:706998:706998 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:706998:706998 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:706998:706998 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:706998:706998 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:706998:706998 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Using network Socket +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:707003:707003 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:707003:707003 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707003:707003 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:707003:707003 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:707003:707003 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:707003:707003 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO ncclCommInitRank comm 0x55e5ffdee2c0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xdf45111bc3f7c3d - Init START +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO ncclCommInitRank comm 0x565577a18180 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xdf45111bc3f7c3d - Init START +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO ncclCommInitRank comm 0x55f19b27a670 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xdf45111bc3f7c3d - Init START +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO ncclCommInitRank comm 0x564f03808510 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xdf45111bc3f7c3d - Init START +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO ncclCommInitRank comm 0x5573eb686eb0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xdf45111bc3f7c3d - Init START +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO ncclCommInitRank comm 0x55cbdefcd930 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xdf45111bc3f7c3d - Init START +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO ncclCommInitRank comm 0x55b087c55d50 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xdf45111bc3f7c3d - Init START +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO ncclCommInitRank comm 0x558765e60e10 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xdf45111bc3f7c3d - Init START +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO comm 0x565577a18180 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO comm 0x55e5ffdee2c0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO comm 0x55f19b27a670 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO comm 0x5573eb686eb0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO comm 0x55b087c55d50 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO comm 0x558765e60e10 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO comm 0x564f03808510 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO comm 0x55cbdefcd930 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:707003:708619 [6] NCCL INFO ncclCommInitRank comm 0x55e5ffdee2c0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xdf45111bc3f7c3d - Init COMPLETE +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:707001:708616 [4] NCCL INFO ncclCommInitRank comm 0x55b087c55d50 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xdf45111bc3f7c3d - Init COMPLETE +ywang29-vrdb-test2-worker-0:707004:708617 [7] NCCL INFO ncclCommInitRank comm 0x565577a18180 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xdf45111bc3f7c3d - Init COMPLETE +ywang29-vrdb-test2-worker-0:707002:708614 [5] NCCL INFO ncclCommInitRank comm 0x55f19b27a670 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xdf45111bc3f7c3d - Init COMPLETE +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:707000:708615 [3] NCCL INFO ncclCommInitRank comm 0x558765e60e10 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xdf45111bc3f7c3d - Init COMPLETE +ywang29-vrdb-test2-worker-0:706998:708618 [1] NCCL INFO ncclCommInitRank comm 0x5573eb686eb0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xdf45111bc3f7c3d - Init COMPLETE +ywang29-vrdb-test2-worker-0:706997:708579 [0] NCCL INFO ncclCommInitRank comm 0x55cbdefcd930 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xdf45111bc3f7c3d - Init COMPLETE +ywang29-vrdb-test2-worker-0:706999:708580 [2] NCCL INFO ncclCommInitRank comm 0x564f03808510 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xdf45111bc3f7c3d - Init COMPLETE +[2025-10-10 08:42:03,294] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 08:42:05,048] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Pre-training init connector._connector.0.scores: Mean=1.000005 +Pre-training init connector._connector.2.scores: Mean=0.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 08:42:23,329 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 08:42:23,335 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:005->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:707002:713633 [5] NCCL INFO ncclCommInitRank comm 0x7f4f1c06b4d0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xde05e00706a74d08 - Init COMPLETE +ywang29-vrdb-test2-worker-0:707004:713635 [7] NCCL INFO ncclCommInitRank comm 0x7f828c06a480 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xde05e00706a74d08 - Init COMPLETE +ywang29-vrdb-test2-worker-0:707003:713629 [6] NCCL INFO ncclCommInitRank comm 0x7f8e8c06b720 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xde05e00706a74d08 - Init COMPLETE +ywang29-vrdb-test2-worker-0:706998:713632 [1] NCCL INFO ncclCommInitRank comm 0x7f1cd006ab50 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xde05e00706a74d08 - Init COMPLETE +ywang29-vrdb-test2-worker-0:707000:713630 [3] NCCL INFO ncclCommInitRank comm 0x7f8c7406b180 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xde05e00706a74d08 - Init COMPLETE +ywang29-vrdb-test2-worker-0:706999:713631 [2] NCCL INFO ncclCommInitRank comm 0x7f9c2006a7e0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xde05e00706a74d08 - Init COMPLETE +ywang29-vrdb-test2-worker-0:706997:713628 [0] NCCL INFO ncclCommInitRank comm 0x7f7d3806ba30 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xde05e00706a74d08 - Init COMPLETE +ywang29-vrdb-test2-worker-0:707001:713634 [4] NCCL INFO ncclCommInitRank comm 0x7f0d0406acf0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xde05e00706a74d08 - Init COMPLETE + 0%| | 1/520 [00:14<2:05:22, 14.49s/it] {'loss': 6.2312, 'grad_norm': 0.43451585758631356, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:05:22, 14.49s/it] 0%| | 2/520 [00:18<1:09:55, 8.10s/it] {'loss': 5.5571, 'grad_norm': 0.41420230493673715, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:18<1:09:55, 8.10s/it] 1%| | 3/520 [00:21<52:17, 6.07s/it] {'loss': 3.6117, 'grad_norm': 0.22325687625735802, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<52:17, 6.07s/it] 1%| | 4/520 [00:25<43:54, 5.11s/it] {'loss': 2.8256, 'grad_norm': 0.18725962782532976, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<43:54, 5.11s/it] 1%| | 5/520 [00:29<39:25, 4.59s/it] {'loss': 2.4855, 'grad_norm': 0.09789816642075339, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:29<39:25, 4.59s/it] 1%| | 6/520 [00:32<36:57, 4.32s/it] {'loss': 2.3184, 'grad_norm': 0.06912670479633531, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:32<36:57, 4.32s/it] 1%|▏ | 7/520 [00:36<35:18, 4.13s/it] {'loss': 1.875, 'grad_norm': 0.030889783085123235, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<35:18, 4.13s/it] 2%|▏ | 8/520 [00:40<35:44, 4.19s/it] {'loss': 1.9741, 'grad_norm': 0.0367343699994114, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<35:44, 4.19s/it] 2%|▏ | 9/520 [00:45<35:42, 4.19s/it] {'loss': 1.9513, 'grad_norm': 0.01993452022049882, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:45<35:42, 4.19s/it] 2%|▏ | 10/520 [00:48<34:35, 4.07s/it] {'loss': 1.7288, 'grad_norm': 0.0578207803742886, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<34:35, 4.07s/it] 2%|▏ | 11/520 [00:52<34:15, 4.04s/it] {'loss': 2.0325, 'grad_norm': 0.10399331074688674, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:52<34:15, 4.04s/it] 2%|▏ | 12/520 [00:56<34:10, 4.04s/it] {'loss': 2.1812, 'grad_norm': 0.06532494659395828, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:56<34:10, 4.04s/it][2025-10-10 08:43:29,449] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:01<35:38, 4.22s/it] {'loss': 1.8266, 'grad_norm': 0.02463574618800627, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:01<35:38, 4.22s/it] 3%|▎ | 14/520 [01:05<34:36, 4.10s/it] {'loss': 1.9091, 'grad_norm': 0.027700177751906176, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:05<34:36, 4.10s/it] 3%|▎ | 15/520 [01:09<33:57, 4.03s/it] {'loss': 2.2437, 'grad_norm': 0.0494423945016077, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:09<33:57, 4.03s/it] 3%|▎ | 16/520 [01:13<33:19, 3.97s/it] {'loss': 2.0201, 'grad_norm': 0.03570034295259166, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:13<33:19, 3.97s/it] 3%|▎ | 17/520 [01:16<32:52, 3.92s/it] {'loss': 1.9563, 'grad_norm': 0.023672321179620745, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:16<32:52, 3.92s/it] 3%|▎ | 18/520 [01:20<32:35, 3.90s/it] {'loss': 1.7227, 'grad_norm': 0.015053291134017488, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:20<32:35, 3.90s/it] 4%|▎ | 19/520 [01:24<32:18, 3.87s/it] {'loss': 2.3878, 'grad_norm': 0.052882458179497, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:24<32:18, 3.87s/it] 4%|▍ | 20/520 [01:28<32:06, 3.85s/it] {'loss': 1.763, 'grad_norm': 0.017978637971409406, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:28<32:06, 3.85s/it] 4%|▍ | 21/520 [01:32<31:59, 3.85s/it] {'loss': 2.295, 'grad_norm': 0.030823569431872488, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:32<31:59, 3.85s/it] 4%|▍ | 22/520 [01:36<31:53, 3.84s/it] {'loss': 1.912, 'grad_norm': 0.01487744988304167, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:36<31:53, 3.84s/it] 4%|▍ | 23/520 [01:39<31:44, 3.83s/it] {'loss': 1.843, 'grad_norm': 0.026333655819808944, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:39<31:44, 3.83s/it] 5%|▍ | 24/520 [01:43<31:38, 3.83s/it] {'loss': 2.05, 'grad_norm': 0.024065059294651962, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:43<31:38, 3.83s/it] 5%|▍ | 25/520 [01:47<31:33, 3.83s/it] {'loss': 1.8533, 'grad_norm': 0.018052828546262792, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:47<31:33, 3.83s/it] 5%|▌ | 26/520 [01:51<31:32, 3.83s/it] {'loss': 1.867, 'grad_norm': 0.027552967725588164, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:51<31:32, 3.83s/it] 5%|▌ | 27/520 [01:55<31:25, 3.82s/it] {'loss': 1.7019, 'grad_norm': 0.012833957414334766, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:55<31:25, 3.82s/it] 5%|▌ | 28/520 [01:58<31:24, 3.83s/it] {'loss': 1.68, 'grad_norm': 0.01015281530344414, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:58<31:24, 3.83s/it] 6%|▌ | 29/520 [02:02<31:27, 3.84s/it] {'loss': 1.7053, 'grad_norm': 0.012202869549417619, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:02<31:27, 3.84s/it] 6%|▌ | 30/520 [02:06<31:20, 3.84s/it] {'loss': 2.2386, 'grad_norm': 0.015793735570763096, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:06<31:20, 3.84s/it] 6%|▌ | 31/520 [02:10<31:16, 3.84s/it] {'loss': 1.7029, 'grad_norm': 0.008585784844026743, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:10<31:16, 3.84s/it] 6%|▌ | 32/520 [02:14<31:16, 3.84s/it] {'loss': 2.4402, 'grad_norm': 0.03222500201064464, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:14<31:16, 3.84s/it] 6%|▋ | 33/520 [02:18<31:10, 3.84s/it] {'loss': 1.7416, 'grad_norm': 0.015256533990172759, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:18<31:10, 3.84s/it] 7%|▋ | 34/520 [02:22<31:05, 3.84s/it] {'loss': 1.6793, 'grad_norm': 0.010551563963539559, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:22<31:05, 3.84s/it] 7%|▋ | 35/520 [02:25<30:56, 3.83s/it] {'loss': 1.7128, 'grad_norm': 0.014727408615590316, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:25<30:56, 3.83s/it] 7%|▋ | 36/520 [02:29<30:37, 3.80s/it] {'loss': 1.8314, 'grad_norm': 0.010309710652746122, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:29<30:37, 3.80s/it] 7%|▋ | 37/520 [02:33<30:12, 3.75s/it] {'loss': 2.1589, 'grad_norm': 0.01164913601021398, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:33<30:12, 3.75s/it] 7%|▋ | 38/520 [02:36<29:56, 3.73s/it] {'loss': 1.9051, 'grad_norm': 0.008955436853329413, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:36<29:56, 3.73s/it] 8%|▊ | 39/520 [02:40<29:41, 3.70s/it] {'loss': 1.6989, 'grad_norm': 0.0075029564237633125, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:40<29:41, 3.70s/it] 8%|▊ | 40/520 [02:44<29:27, 3.68s/it] {'loss': 1.7325, 'grad_norm': 0.0070820893551878, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:44<29:27, 3.68s/it] 8%|▊ | 41/520 [02:47<29:28, 3.69s/it] {'loss': 1.695, 'grad_norm': 0.007769545329008874, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:47<29:28, 3.69s/it] 8%|▊ | 42/520 [02:51<29:20, 3.68s/it] {'loss': 1.7514, 'grad_norm': 0.015464524731218265, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:51<29:20, 3.68s/it] 8%|▊ | 43/520 [02:55<29:17, 3.68s/it] {'loss': 1.9126, 'grad_norm': 0.009952441397186432, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:55<29:17, 3.68s/it] 8%|▊ | 44/520 [02:58<29:17, 3.69s/it] {'loss': 2.0727, 'grad_norm': 0.011623341053529784, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:58<29:17, 3.69s/it] 9%|▊ | 45/520 [03:02<29:06, 3.68s/it] {'loss': 1.7186, 'grad_norm': 0.012164979990011747, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:02<29:06, 3.68s/it] 9%|▉ | 46/520 [03:06<28:54, 3.66s/it] {'loss': 2.1282, 'grad_norm': 0.010596099142801838, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:06<28:54, 3.66s/it] 9%|▉ | 47/520 [03:09<28:53, 3.67s/it] {'loss': 1.7157, 'grad_norm': 0.01701559890668399, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:09<28:53, 3.67s/it] 9%|▉ | 48/520 [03:13<28:49, 3.66s/it] {'loss': 1.6802, 'grad_norm': 0.00871515380698716, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:13<28:49, 3.66s/it] 9%|▉ | 49/520 [03:17<28:42, 3.66s/it] {'loss': 1.7026, 'grad_norm': 0.015230002455687409, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:17<28:42, 3.66s/it] 10%|▉ | 50/520 [03:20<28:39, 3.66s/it] {'loss': 1.684, 'grad_norm': 0.00700300051063068, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:20<28:39, 3.66s/it] 10%|▉ | 51/520 [03:24<28:34, 3.66s/it] {'loss': 1.5857, 'grad_norm': 0.011609926858205643, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:24<28:34, 3.66s/it] 10%|█ | 52/520 [03:28<28:36, 3.67s/it] {'loss': 1.7587, 'grad_norm': 0.007926940865021149, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:28<28:36, 3.67s/it] 10%|█ | 53/520 [03:31<28:30, 3.66s/it] {'loss': 1.7567, 'grad_norm': 0.010512813627580478, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:31<28:30, 3.66s/it] 10%|█ | 54/520 [03:35<28:24, 3.66s/it] {'loss': 1.6065, 'grad_norm': 0.006548906310137266, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:35<28:24, 3.66s/it] 11%|█ | 55/520 [03:39<28:19, 3.65s/it] {'loss': 1.6203, 'grad_norm': 0.008937174234408821, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:39<28:19, 3.65s/it] 11%|█ | 56/520 [03:42<28:15, 3.65s/it] {'loss': 1.7778, 'grad_norm': 0.006855899181248532, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:42<28:15, 3.65s/it] 11%|█ | 57/520 [03:46<28:12, 3.65s/it] {'loss': 1.6053, 'grad_norm': 0.009286400602611308, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:46<28:12, 3.65s/it] 11%|█ | 58/520 [03:50<28:10, 3.66s/it] {'loss': 1.7751, 'grad_norm': 0.006521210852453567, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:50<28:10, 3.66s/it] 11%|█▏ | 59/520 [03:53<28:05, 3.66s/it] {'loss': 1.8003, 'grad_norm': 0.010749240468089515, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:53<28:05, 3.66s/it] 12%|█▏ | 60/520 [03:57<28:07, 3.67s/it] {'loss': 1.6919, 'grad_norm': 0.009167403028465014, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:57<28:07, 3.67s/it] 12%|█▏ | 61/520 [04:01<27:58, 3.66s/it] {'loss': 1.9287, 'grad_norm': 0.010273907371306186, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:01<27:58, 3.66s/it] 12%|█▏ | 62/520 [04:04<27:50, 3.65s/it] {'loss': 1.6657, 'grad_norm': 0.008161271811790145, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:04<27:50, 3.65s/it] 12%|█▏ | 63/520 [04:08<27:47, 3.65s/it] {'loss': 1.6431, 'grad_norm': 0.006346527229758144, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:08<27:47, 3.65s/it] 12%|█▏ | 64/520 [04:12<27:45, 3.65s/it] {'loss': 1.6675, 'grad_norm': 0.006888905376021457, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:12<27:45, 3.65s/it] 12%|█▎ | 65/520 [04:15<27:43, 3.66s/it] {'loss': 1.6758, 'grad_norm': 0.007358763834180543, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:15<27:43, 3.66s/it] 13%|█▎ | 66/520 [04:19<27:59, 3.70s/it] {'loss': 1.651, 'grad_norm': 0.0062979150103887015, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:19<27:59, 3.70s/it] 13%|█▎ | 67/520 [04:23<27:46, 3.68s/it] {'loss': 1.4878, 'grad_norm': 0.007822691987128537, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:23<27:46, 3.68s/it] 13%|█▎ | 68/520 [04:26<27:41, 3.68s/it] {'loss': 1.5425, 'grad_norm': 0.005836108579791597, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:26<27:41, 3.68s/it] 13%|█▎ | 69/520 [04:30<27:36, 3.67s/it] {'loss': 1.5159, 'grad_norm': 0.0074510076907231895, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:30<27:36, 3.67s/it] 13%|█▎ | 70/520 [04:34<27:30, 3.67s/it] {'loss': 1.5871, 'grad_norm': 0.0069197078415364575, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:34<27:30, 3.67s/it] 14%|█▎ | 71/520 [04:37<27:35, 3.69s/it] {'loss': 1.4976, 'grad_norm': 0.005712015602934051, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:37<27:35, 3.69s/it] 14%|█▍ | 72/520 [04:41<27:22, 3.67s/it] {'loss': 1.6717, 'grad_norm': 0.006368341640629186, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:41<27:22, 3.67s/it] 14%|█▍ | 73/520 [04:45<27:19, 3.67s/it] {'loss': 1.487, 'grad_norm': 0.005941528957424244, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:45<27:19, 3.67s/it] 14%|█▍ | 74/520 [04:48<27:15, 3.67s/it] {'loss': 1.631, 'grad_norm': 0.007677533230982975, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:48<27:15, 3.67s/it] 14%|█▍ | 75/520 [04:52<27:11, 3.67s/it] {'loss': 1.4818, 'grad_norm': 0.00582967804319192, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:52<27:11, 3.67s/it] 15%|█▍ | 76/520 [04:56<27:04, 3.66s/it] {'loss': 1.9632, 'grad_norm': 0.009482491813799626, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:56<27:04, 3.66s/it] 15%|█▍ | 77/520 [04:59<27:05, 3.67s/it] {'loss': 1.4382, 'grad_norm': 0.006495443279816893, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:59<27:05, 3.67s/it] 15%|█▌ | 78/520 [05:03<27:00, 3.67s/it] {'loss': 1.5683, 'grad_norm': 0.006290414067870005, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:03<27:00, 3.67s/it] 15%|█▌ | 79/520 [05:07<27:10, 3.70s/it] {'loss': 1.5391, 'grad_norm': 0.005344299592288582, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:07<27:10, 3.70s/it] 15%|█▌ | 80/520 [05:10<27:15, 3.72s/it] {'loss': 1.9826, 'grad_norm': 0.010168959395716708, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:10<27:15, 3.72s/it] 16%|█▌ | 81/520 [05:14<27:10, 3.71s/it] {'loss': 1.7086, 'grad_norm': 0.007845189693553235, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:14<27:10, 3.71s/it] 16%|█▌ | 82/520 [05:18<26:57, 3.69s/it] {'loss': 1.6153, 'grad_norm': 0.0061029170932008605, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:18<26:57, 3.69s/it] 16%|█▌ | 83/520 [05:22<26:56, 3.70s/it] {'loss': 1.6522, 'grad_norm': 0.0062603085656339555, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:22<26:56, 3.70s/it] 16%|█▌ | 84/520 [05:25<26:51, 3.70s/it] {'loss': 1.667, 'grad_norm': 0.007196272693238519, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:25<26:51, 3.70s/it] 16%|█▋ | 85/520 [05:29<26:53, 3.71s/it] {'loss': 1.6419, 'grad_norm': 0.007720206920207104, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:29<26:53, 3.71s/it] 17%|█▋ | 86/520 [05:33<26:47, 3.70s/it] {'loss': 1.7093, 'grad_norm': 0.006407825155568524, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:33<26:47, 3.70s/it] 17%|█▋ | 87/520 [05:36<26:45, 3.71s/it] {'loss': 1.9184, 'grad_norm': 0.009344235184201431, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:36<26:45, 3.71s/it] 17%|█▋ | 88/520 [05:40<26:54, 3.74s/it] {'loss': 1.9583, 'grad_norm': 0.008994282520308238, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:40<26:54, 3.74s/it] 17%|█▋ | 89/520 [05:44<27:08, 3.78s/it] {'loss': 1.6152, 'grad_norm': 0.006035869521848264, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:44<27:08, 3.78s/it] 17%|█▋ | 90/520 [05:48<27:16, 3.81s/it] {'loss': 1.5342, 'grad_norm': 0.006542056240782494, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:48<27:16, 3.81s/it] 18%|█▊ | 91/520 [05:52<27:11, 3.80s/it] {'loss': 1.6339, 'grad_norm': 0.0056430720052886895, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:52<27:11, 3.80s/it] 18%|█▊ | 92/520 [05:55<26:50, 3.76s/it] {'loss': 1.5582, 'grad_norm': 0.006159023073922652, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:55<26:50, 3.76s/it] 18%|█▊ | 93/520 [05:59<26:40, 3.75s/it] {'loss': 1.5589, 'grad_norm': 0.0065839441194015655, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:59<26:40, 3.75s/it] 18%|█▊ | 94/520 [06:03<26:27, 3.73s/it] {'loss': 1.6881, 'grad_norm': 0.006572698133612315, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:03<26:27, 3.73s/it] 18%|█▊ | 95/520 [06:06<26:13, 3.70s/it] {'loss': 1.5331, 'grad_norm': 0.006275003805343696, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:06<26:13, 3.70s/it] 18%|█▊ | 96/520 [06:10<26:02, 3.69s/it] {'loss': 1.537, 'grad_norm': 0.006854299177327094, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:10<26:02, 3.69s/it] 19%|█▊ | 97/520 [06:14<25:59, 3.69s/it] {'loss': 1.5177, 'grad_norm': 0.007336315132591468, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:14<25:59, 3.69s/it] 19%|█▉ | 98/520 [06:17<25:53, 3.68s/it] {'loss': 1.5067, 'grad_norm': 0.005099845028938043, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:17<25:53, 3.68s/it] 19%|█▉ | 99/520 [06:21<25:49, 3.68s/it] {'loss': 1.5509, 'grad_norm': 0.005743012937607586, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:21<25:49, 3.68s/it] 19%|█▉ | 100/520 [06:25<25:39, 3.66s/it] {'loss': 1.7825, 'grad_norm': 0.008377982895837776, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:25<25:39, 3.66s/it] 19%|█▉ | 101/520 [06:28<25:35, 3.66s/it] {'loss': 1.5282, 'grad_norm': 0.006096382870651379, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:28<25:35, 3.66s/it] 20%|█▉ | 102/520 [06:32<25:29, 3.66s/it] {'loss': 1.5492, 'grad_norm': 0.0069212373334217635, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:32<25:29, 3.66s/it] 20%|█▉ | 103/520 [06:36<25:19, 3.64s/it] {'loss': 1.4651, 'grad_norm': 0.005203969339481186, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:36<25:19, 3.64s/it] 20%|██ | 104/520 [06:39<25:16, 3.64s/it] {'loss': 1.5508, 'grad_norm': 0.006051592878339385, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:39<25:16, 3.64s/it] 20%|██ | 105/520 [06:43<25:12, 3.64s/it] {'loss': 1.5426, 'grad_norm': 0.005161456062627608, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:43<25:12, 3.64s/it] 20%|██ | 106/520 [06:47<25:09, 3.65s/it] {'loss': 1.7354, 'grad_norm': 0.006191933956302688, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:47<25:09, 3.65s/it] 21%|██ | 107/520 [06:50<25:09, 3.65s/it] {'loss': 1.7134, 'grad_norm': 0.007141767581681747, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:50<25:09, 3.65s/it] 21%|██ | 108/520 [06:54<25:08, 3.66s/it] {'loss': 1.4962, 'grad_norm': 0.006039975101164527, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:54<25:08, 3.66s/it] 21%|██ | 109/520 [06:58<25:13, 3.68s/it] {'loss': 1.6995, 'grad_norm': 0.005480185271903771, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:58<25:13, 3.68s/it] 21%|██ | 110/520 [07:01<25:10, 3.68s/it] {'loss': 1.6879, 'grad_norm': 0.005650824756441506, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:01<25:10, 3.68s/it] 21%|██▏ | 111/520 [07:05<25:07, 3.69s/it] {'loss': 1.7207, 'grad_norm': 0.0062878717921883275, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:05<25:07, 3.69s/it] 22%|██▏ | 112/520 [07:09<25:09, 3.70s/it] {'loss': 1.5652, 'grad_norm': 0.0064480663967062864, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:09<25:09, 3.70s/it] 22%|██▏ | 113/520 [07:12<25:04, 3.70s/it] {'loss': 1.4242, 'grad_norm': 0.005476811943837112, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:12<25:04, 3.70s/it] 22%|██▏ | 114/520 [07:16<24:54, 3.68s/it] {'loss': 1.5471, 'grad_norm': 0.005512120823829246, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:16<24:54, 3.68s/it] 22%|██▏ | 115/520 [07:20<24:45, 3.67s/it] {'loss': 1.6816, 'grad_norm': 0.005422538365685953, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:20<24:45, 3.67s/it] 22%|██▏ | 116/520 [07:23<24:37, 3.66s/it] {'loss': 1.6541, 'grad_norm': 0.005345904668524058, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:23<24:37, 3.66s/it] 22%|██▎ | 117/520 [07:27<24:50, 3.70s/it] {'loss': 1.6481, 'grad_norm': 0.0059787057811320835, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:27<24:50, 3.70s/it] 23%|██▎ | 118/520 [07:31<25:03, 3.74s/it] {'loss': 1.5025, 'grad_norm': 0.005127047680400339, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:31<25:03, 3.74s/it] 23%|██▎ | 119/520 [07:35<25:07, 3.76s/it] {'loss': 1.4516, 'grad_norm': 0.006464983764622941, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:35<25:07, 3.76s/it] 23%|██▎ | 120/520 [07:39<25:14, 3.79s/it] {'loss': 1.4696, 'grad_norm': 0.0069438525924139394, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:39<25:14, 3.79s/it] 23%|██▎ | 121/520 [07:42<25:15, 3.80s/it] {'loss': 1.547, 'grad_norm': 0.007028861320948459, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:43<25:15, 3.80s/it] 23%|██▎ | 122/520 [07:46<25:20, 3.82s/it] {'loss': 1.4097, 'grad_norm': 0.005851519803246503, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:46<25:20, 3.82s/it] 24%|██▎ | 123/520 [07:50<25:22, 3.84s/it] {'loss': 1.7861, 'grad_norm': 0.007299944273839835, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:50<25:22, 3.84s/it] 24%|██▍ | 124/520 [07:54<25:21, 3.84s/it] {'loss': 1.5282, 'grad_norm': 0.005904408326393631, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:54<25:21, 3.84s/it] 24%|██▍ | 125/520 [07:58<25:09, 3.82s/it] {'loss': 1.5041, 'grad_norm': 0.005604614307930636, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:58<25:09, 3.82s/it] 24%|██▍ | 126/520 [08:02<26:03, 3.97s/it] {'loss': 1.6413, 'grad_norm': 0.005122363042613067, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:02<26:03, 3.97s/it] 24%|██▍ | 127/520 [08:06<25:23, 3.88s/it] {'loss': 1.4788, 'grad_norm': 0.0070597152126542434, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:06<25:23, 3.88s/it] 25%|██▍ | 128/520 [08:10<24:54, 3.81s/it] {'loss': 1.5531, 'grad_norm': 0.005919183271975226, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:10<24:54, 3.81s/it] 25%|██▍ | 129/520 [08:13<24:30, 3.76s/it] {'loss': 1.4539, 'grad_norm': 0.004703577958413705, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:13<24:30, 3.76s/it] 25%|██▌ | 130/520 [08:17<24:09, 3.72s/it] {'loss': 1.5337, 'grad_norm': 0.004691538127393417, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:17<24:09, 3.72s/it] 25%|██▌ | 131/520 [08:20<23:57, 3.70s/it] {'loss': 1.6246, 'grad_norm': 0.005602368718548311, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:20<23:57, 3.70s/it] 25%|██▌ | 132/520 [08:24<23:44, 3.67s/it] {'loss': 1.5742, 'grad_norm': 0.006120635433004826, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:24<23:44, 3.67s/it] 26%|██▌ | 133/520 [08:28<23:36, 3.66s/it] {'loss': 1.4607, 'grad_norm': 0.005911424656722392, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:28<23:36, 3.66s/it] 26%|██▌ | 134/520 [08:31<23:29, 3.65s/it] {'loss': 1.5501, 'grad_norm': 0.004973813554476704, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:31<23:29, 3.65s/it] 26%|██▌ | 135/520 [08:35<23:23, 3.64s/it] {'loss': 1.645, 'grad_norm': 0.005269472179642294, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:35<23:23, 3.64s/it] 26%|██▌ | 136/520 [08:39<23:22, 3.65s/it] {'loss': 1.5413, 'grad_norm': 0.005500956061080576, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:39<23:22, 3.65s/it] 26%|██▋ | 137/520 [08:42<23:15, 3.64s/it] {'loss': 1.4684, 'grad_norm': 0.006799005627778055, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:42<23:15, 3.64s/it] 27%|██▋ | 138/520 [08:46<23:12, 3.64s/it] {'loss': 1.4587, 'grad_norm': 0.004627375933839497, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:46<23:12, 3.64s/it] 27%|██▋ | 139/520 [08:50<23:11, 3.65s/it] {'loss': 1.5288, 'grad_norm': 0.006073960708229719, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:50<23:11, 3.65s/it] 27%|██▋ | 140/520 [08:53<23:12, 3.67s/it] {'loss': 1.686, 'grad_norm': 0.006644331258905144, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:53<23:12, 3.67s/it] 27%|██▋ | 141/520 [08:57<23:06, 3.66s/it] {'loss': 1.5971, 'grad_norm': 0.004909913238358421, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:57<23:06, 3.66s/it] 27%|██▋ | 142/520 [09:01<23:03, 3.66s/it] {'loss': 1.7154, 'grad_norm': 0.0071429987742251495, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:01<23:03, 3.66s/it] 28%|██▊ | 143/520 [09:04<22:59, 3.66s/it] {'loss': 1.5169, 'grad_norm': 0.006186955045575832, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:04<22:59, 3.66s/it] 28%|██▊ | 144/520 [09:08<22:54, 3.66s/it] {'loss': 1.4461, 'grad_norm': 0.007779162927484316, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:08<22:54, 3.66s/it] 28%|██▊ | 145/520 [09:11<22:51, 3.66s/it] {'loss': 1.3868, 'grad_norm': 0.004957381050058968, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:11<22:51, 3.66s/it] 28%|██▊ | 146/520 [09:15<22:50, 3.66s/it] {'loss': 1.75, 'grad_norm': 0.005673156438156683, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:15<22:50, 3.66s/it] 28%|██▊ | 147/520 [09:19<22:49, 3.67s/it] {'loss': 1.4297, 'grad_norm': 0.006380892991456665, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:19<22:49, 3.67s/it] 28%|██▊ | 148/520 [09:23<22:42, 3.66s/it] {'loss': 1.4694, 'grad_norm': 0.005134843632120446, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:23<22:42, 3.66s/it] 29%|██▊ | 149/520 [09:26<22:52, 3.70s/it] {'loss': 1.4232, 'grad_norm': 0.005327953654100003, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:26<22:52, 3.70s/it] 29%|██▉ | 150/520 [09:30<22:44, 3.69s/it] {'loss': 1.664, 'grad_norm': 0.004938364567664099, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:30<22:44, 3.69s/it] 29%|██▉ | 151/520 [09:34<22:35, 3.67s/it] {'loss': 1.4493, 'grad_norm': 0.004897630814827271, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:34<22:35, 3.67s/it] 29%|██▉ | 152/520 [09:37<22:29, 3.67s/it] {'loss': 1.4172, 'grad_norm': 0.004989421252133179, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:37<22:29, 3.67s/it] 29%|██▉ | 153/520 [09:41<22:41, 3.71s/it] {'loss': 1.4539, 'grad_norm': 0.0047257806858402585, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:41<22:41, 3.71s/it] 30%|██▉ | 154/520 [09:45<22:39, 3.71s/it] {'loss': 1.5481, 'grad_norm': 0.0047122617182058105, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:45<22:39, 3.71s/it] 30%|██▉ | 155/520 [09:48<22:32, 3.71s/it] {'loss': 1.4501, 'grad_norm': 0.00540886778155036, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:48<22:32, 3.71s/it] 30%|███ | 156/520 [09:52<22:25, 3.70s/it] {'loss': 1.4806, 'grad_norm': 0.005114925566430599, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:52<22:25, 3.70s/it] 30%|███ | 157/520 [09:56<22:17, 3.69s/it] {'loss': 1.7889, 'grad_norm': 0.0067753622576632215, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:56<22:17, 3.69s/it] 30%|███ | 158/520 [09:59<22:13, 3.68s/it] {'loss': 1.45, 'grad_norm': 0.0058388140746038625, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [09:59<22:13, 3.68s/it] 31%|███ | 159/520 [10:03<22:25, 3.73s/it] {'loss': 1.4736, 'grad_norm': 0.0050231359593170275, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:03<22:25, 3.73s/it] 31%|███ | 160/520 [10:07<22:23, 3.73s/it] {'loss': 1.5146, 'grad_norm': 0.005425853931256985, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:07<22:23, 3.73s/it] 31%|███ | 161/520 [10:11<22:14, 3.72s/it] {'loss': 1.5042, 'grad_norm': 0.006304944271651955, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:11<22:14, 3.72s/it] 31%|███ | 162/520 [10:14<22:05, 3.70s/it] {'loss': 1.6678, 'grad_norm': 0.006190556450344027, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:14<22:05, 3.70s/it] 31%|███▏ | 163/520 [10:18<21:58, 3.69s/it] {'loss': 1.3553, 'grad_norm': 0.005804432751008657, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:18<21:58, 3.69s/it] 32%|███▏ | 164/520 [10:22<21:57, 3.70s/it] {'loss': 1.3236, 'grad_norm': 0.004364985921978508, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:22<21:57, 3.70s/it] 32%|███▏ | 165/520 [10:25<21:49, 3.69s/it] {'loss': 1.4743, 'grad_norm': 0.004755443449541639, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:25<21:49, 3.69s/it] 32%|███▏ | 166/520 [10:29<21:42, 3.68s/it] {'loss': 1.484, 'grad_norm': 0.00553733666992128, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:29<21:42, 3.68s/it] 32%|███▏ | 167/520 [10:33<21:58, 3.73s/it] {'loss': 1.4626, 'grad_norm': 0.00491981801602783, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:33<21:58, 3.73s/it] 32%|███▏ | 168/520 [10:37<21:56, 3.74s/it] {'loss': 1.3924, 'grad_norm': 0.005512432919943203, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:37<21:56, 3.74s/it] 32%|███▎ | 169/520 [10:41<22:07, 3.78s/it] {'loss': 1.4884, 'grad_norm': 0.004825511439836379, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:41<22:07, 3.78s/it] 33%|███▎ | 170/520 [10:44<22:11, 3.80s/it] {'loss': 1.5759, 'grad_norm': 0.005108824048060842, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:44<22:11, 3.80s/it] 33%|███▎ | 171/520 [10:48<22:13, 3.82s/it] {'loss': 1.4175, 'grad_norm': 0.005222994051652966, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:48<22:13, 3.82s/it] 33%|███▎ | 172/520 [10:52<22:02, 3.80s/it] {'loss': 1.4744, 'grad_norm': 0.004992515073264653, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:52<22:02, 3.80s/it] 33%|███▎ | 173/520 [10:56<21:46, 3.76s/it] {'loss': 1.3961, 'grad_norm': 0.004565856775088016, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:56<21:46, 3.76s/it] 33%|███▎ | 174/520 [10:59<21:32, 3.74s/it] {'loss': 1.4903, 'grad_norm': 0.004942634681810159, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [10:59<21:32, 3.74s/it] 34%|███▎ | 175/520 [11:03<21:24, 3.72s/it] {'loss': 1.3859, 'grad_norm': 0.005265403201419494, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:03<21:24, 3.72s/it] 34%|███▍ | 176/520 [11:07<21:16, 3.71s/it] {'loss': 1.6848, 'grad_norm': 0.005453450872494653, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:07<21:16, 3.71s/it] 34%|███▍ | 177/520 [11:11<21:17, 3.72s/it] {'loss': 1.5172, 'grad_norm': 0.004774861914990001, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:11<21:17, 3.72s/it] 34%|███▍ | 178/520 [11:14<21:20, 3.74s/it] {'loss': 1.462, 'grad_norm': 0.0055982413204056095, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:14<21:20, 3.74s/it] 34%|███▍ | 179/520 [11:18<21:23, 3.76s/it] {'loss': 1.5495, 'grad_norm': 0.004740321169086494, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:18<21:23, 3.76s/it] 35%|███▍ | 180/520 [11:22<21:22, 3.77s/it] {'loss': 1.4577, 'grad_norm': 0.005685096263359819, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:22<21:22, 3.77s/it] 35%|███▍ | 181/520 [11:26<21:21, 3.78s/it] {'loss': 1.4239, 'grad_norm': 0.004408721896981247, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:26<21:21, 3.78s/it] 35%|███▌ | 182/520 [11:30<21:22, 3.79s/it] {'loss': 1.4379, 'grad_norm': 0.005077723614157329, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:30<21:22, 3.79s/it] 35%|███▌ | 183/520 [11:33<21:17, 3.79s/it] {'loss': 1.4701, 'grad_norm': 0.005010642904273009, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:33<21:17, 3.79s/it] 35%|███▌ | 184/520 [11:37<21:16, 3.80s/it] {'loss': 1.3552, 'grad_norm': 0.004714965080799022, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:37<21:16, 3.80s/it] 36%|███▌ | 185/520 [11:41<21:15, 3.81s/it] {'loss': 1.574, 'grad_norm': 0.004654480287991895, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:41<21:15, 3.81s/it] 36%|███▌ | 186/520 [11:45<21:10, 3.80s/it] {'loss': 1.3853, 'grad_norm': 0.005081825145894096, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:45<21:10, 3.80s/it] 36%|███▌ | 187/520 [11:49<21:08, 3.81s/it] {'loss': 1.4155, 'grad_norm': 0.005390335472578065, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:49<21:08, 3.81s/it] 36%|███▌ | 188/520 [11:52<21:03, 3.81s/it] {'loss': 1.467, 'grad_norm': 0.006034976480971933, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:52<21:03, 3.81s/it] 36%|███▋ | 189/520 [11:56<21:01, 3.81s/it] {'loss': 1.5009, 'grad_norm': 0.004372225550952047, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:56<21:01, 3.81s/it] 37%|███▋ | 190/520 [12:00<20:57, 3.81s/it] {'loss': 1.3966, 'grad_norm': 0.005163627982543569, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:00<20:57, 3.81s/it] 37%|███▋ | 191/520 [12:04<20:54, 3.81s/it] {'loss': 1.3657, 'grad_norm': 0.005466553514960332, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:04<20:54, 3.81s/it] 37%|███▋ | 192/520 [12:08<21:02, 3.85s/it] {'loss': 1.4707, 'grad_norm': 0.004754977055064443, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:08<21:02, 3.85s/it] 37%|███▋ | 193/520 [12:12<21:17, 3.91s/it] {'loss': 1.6004, 'grad_norm': 0.006049534681188301, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:12<21:17, 3.91s/it] 37%|███▋ | 194/520 [12:16<21:26, 3.95s/it] {'loss': 1.4394, 'grad_norm': 0.004666666058157109, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:16<21:26, 3.95s/it] 38%|███▊ | 195/520 [12:20<21:30, 3.97s/it] {'loss': 1.4714, 'grad_norm': 0.00471167094008602, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:20<21:30, 3.97s/it] 38%|███▊ | 196/520 [12:24<21:31, 3.99s/it] {'loss': 1.4263, 'grad_norm': 0.005069575099394916, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:24<21:31, 3.99s/it] 38%|███▊ | 197/520 [12:28<21:32, 4.00s/it] {'loss': 1.3968, 'grad_norm': 0.005761568664987541, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:28<21:32, 4.00s/it] 38%|███▊ | 198/520 [12:32<21:31, 4.01s/it] {'loss': 1.485, 'grad_norm': 0.005316434417438112, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:32<21:31, 4.01s/it] 38%|███▊ | 199/520 [12:36<21:26, 4.01s/it] {'loss': 1.3881, 'grad_norm': 0.005862142537352204, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:36<21:26, 4.01s/it] 38%|███▊ | 200/520 [12:40<21:04, 3.95s/it] {'loss': 1.5141, 'grad_norm': 0.005160387879049857, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:40<21:04, 3.95s/it] 39%|███▊ | 201/520 [12:44<20:49, 3.92s/it] {'loss': 1.5179, 'grad_norm': 0.004772845900403899, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:44<20:49, 3.92s/it] 39%|███▉ | 202/520 [12:48<20:40, 3.90s/it] {'loss': 1.3748, 'grad_norm': 0.004571002930888528, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:48<20:40, 3.90s/it] 39%|███▉ | 203/520 [12:51<20:30, 3.88s/it] {'loss': 1.4237, 'grad_norm': 0.004918843008808463, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:51<20:30, 3.88s/it] 39%|███▉ | 204/520 [12:55<20:20, 3.86s/it] {'loss': 1.469, 'grad_norm': 0.0057049481612737, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:55<20:20, 3.86s/it] 39%|███▉ | 205/520 [12:59<20:15, 3.86s/it] {'loss': 1.5285, 'grad_norm': 0.005766111027819585, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:59<20:15, 3.86s/it] 40%|███▉ | 206/520 [13:03<20:09, 3.85s/it] {'loss': 1.5212, 'grad_norm': 0.005100893541599511, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:03<20:09, 3.85s/it] 40%|███▉ | 207/520 [13:07<20:05, 3.85s/it] {'loss': 1.5073, 'grad_norm': 0.004456286087803453, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:07<20:05, 3.85s/it] 40%|████ | 208/520 [13:11<19:58, 3.84s/it] {'loss': 1.4668, 'grad_norm': 0.005498302943066416, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:11<19:58, 3.84s/it] 40%|████ | 209/520 [13:14<19:55, 3.84s/it] {'loss': 1.3975, 'grad_norm': 0.004801846138862364, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:14<19:55, 3.84s/it] 40%|████ | 210/520 [13:18<19:33, 3.79s/it] {'loss': 1.4753, 'grad_norm': 0.005258839386660836, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:18<19:33, 3.79s/it] 41%|████ | 211/520 [13:22<19:19, 3.75s/it] {'loss': 1.4933, 'grad_norm': 0.004427535639274846, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:22<19:19, 3.75s/it] 41%|████ | 212/520 [13:25<19:06, 3.72s/it] {'loss': 1.444, 'grad_norm': 0.004523425291754949, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:25<19:06, 3.72s/it] 41%|████ | 213/520 [13:29<18:57, 3.71s/it] {'loss': 1.4151, 'grad_norm': 0.0055335864736087695, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:29<18:57, 3.71s/it] 41%|████ | 214/520 [13:33<18:51, 3.70s/it] {'loss': 1.4127, 'grad_norm': 0.004926676730557736, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:33<18:51, 3.70s/it] 41%|████▏ | 215/520 [13:36<18:45, 3.69s/it] {'loss': 1.4233, 'grad_norm': 0.00455463583825809, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:36<18:45, 3.69s/it] 42%|████▏ | 216/520 [13:40<18:38, 3.68s/it] {'loss': 1.3066, 'grad_norm': 0.004772798256945379, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:40<18:38, 3.68s/it] 42%|████▏ | 217/520 [13:44<18:34, 3.68s/it] {'loss': 1.4429, 'grad_norm': 0.004843742147855708, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:44<18:34, 3.68s/it] 42%|████▏ | 218/520 [13:47<18:28, 3.67s/it] {'loss': 1.4519, 'grad_norm': 0.004982037378951474, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:47<18:28, 3.67s/it] 42%|████▏ | 219/520 [13:51<18:22, 3.66s/it] {'loss': 1.3937, 'grad_norm': 0.004067678639403212, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:51<18:22, 3.66s/it] 42%|████▏ | 220/520 [13:55<18:16, 3.65s/it] {'loss': 1.5031, 'grad_norm': 0.005150333348652988, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:55<18:16, 3.65s/it] 42%|████▎ | 221/520 [13:58<18:13, 3.66s/it] {'loss': 1.4429, 'grad_norm': 0.004415633745856906, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:58<18:13, 3.66s/it] 43%|████▎ | 222/520 [14:02<18:09, 3.66s/it] {'loss': 1.3402, 'grad_norm': 0.004839825219582367, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:02<18:09, 3.66s/it] 43%|████▎ | 223/520 [14:06<18:03, 3.65s/it] {'loss': 1.332, 'grad_norm': 0.004372971041105776, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:06<18:03, 3.65s/it] 43%|████▎ | 224/520 [14:09<18:01, 3.66s/it] {'loss': 1.7414, 'grad_norm': 0.006353021494323161, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:09<18:01, 3.66s/it] 43%|████▎ | 225/520 [14:13<17:56, 3.65s/it] {'loss': 1.3599, 'grad_norm': 0.00573711593719213, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:13<17:56, 3.65s/it] 43%|████▎ | 226/520 [14:17<17:53, 3.65s/it] {'loss': 1.462, 'grad_norm': 0.004538957349635121, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:17<17:53, 3.65s/it] 44%|████▎ | 227/520 [14:20<17:48, 3.65s/it] {'loss': 1.4453, 'grad_norm': 0.004537485185299772, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:20<17:48, 3.65s/it] 44%|████▍ | 228/520 [14:24<17:43, 3.64s/it] {'loss': 1.6521, 'grad_norm': 0.005726015966822297, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:24<17:43, 3.64s/it] 44%|████▍ | 229/520 [14:27<17:39, 3.64s/it] {'loss': 1.424, 'grad_norm': 0.004281586520316389, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:27<17:39, 3.64s/it] 44%|████▍ | 230/520 [14:31<17:35, 3.64s/it] {'loss': 1.2946, 'grad_norm': 0.004894358569026533, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:31<17:35, 3.64s/it] 44%|████▍ | 231/520 [14:35<17:35, 3.65s/it] {'loss': 1.3437, 'grad_norm': 0.004222478736039977, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:35<17:35, 3.65s/it] 45%|████▍ | 232/520 [14:38<17:31, 3.65s/it] {'loss': 1.6945, 'grad_norm': 0.005830893656592422, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:38<17:31, 3.65s/it] 45%|████▍ | 233/520 [14:42<17:26, 3.65s/it] {'loss': 1.5428, 'grad_norm': 0.005199887962046031, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:42<17:26, 3.65s/it] 45%|████▌ | 234/520 [14:46<17:22, 3.65s/it] {'loss': 1.302, 'grad_norm': 0.004649971873136032, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:46<17:22, 3.65s/it] 45%|████▌ | 235/520 [14:49<17:20, 3.65s/it] {'loss': 1.3595, 'grad_norm': 0.005166746964761061, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:49<17:20, 3.65s/it] 45%|████▌ | 236/520 [14:53<17:15, 3.64s/it] {'loss': 1.4966, 'grad_norm': 0.0043313600820673055, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:53<17:15, 3.64s/it] 46%|████▌ | 237/520 [14:57<17:10, 3.64s/it] {'loss': 1.4465, 'grad_norm': 0.005032076234637611, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:57<17:10, 3.64s/it] 46%|████▌ | 238/520 [15:00<17:07, 3.64s/it] {'loss': 1.3776, 'grad_norm': 0.004799806957910661, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:00<17:07, 3.64s/it] 46%|████▌ | 239/520 [15:04<17:03, 3.64s/it] {'loss': 1.4963, 'grad_norm': 0.0050807014411672876, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:04<17:03, 3.64s/it] 46%|████▌ | 240/520 [15:08<17:00, 3.64s/it] {'loss': 1.2348, 'grad_norm': 0.005018411044151711, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:08<17:00, 3.64s/it] 46%|████▋ | 241/520 [15:11<16:58, 3.65s/it] {'loss': 1.3241, 'grad_norm': 0.004386500215946088, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:11<16:58, 3.65s/it] 47%|████▋ | 242/520 [15:15<16:54, 3.65s/it] {'loss': 1.3581, 'grad_norm': 0.0044593030748154815, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:15<16:54, 3.65s/it] 47%|████▋ | 243/520 [15:19<16:48, 3.64s/it] {'loss': 1.3393, 'grad_norm': 0.0050003407924561595, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:19<16:48, 3.64s/it] 47%|████▋ | 244/520 [15:22<16:42, 3.63s/it] {'loss': 1.4957, 'grad_norm': 0.004477785891833427, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:22<16:42, 3.63s/it] 47%|████▋ | 245/520 [15:26<16:38, 3.63s/it] {'loss': 1.3271, 'grad_norm': 0.004718624841112673, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:26<16:38, 3.63s/it] 47%|████▋ | 246/520 [15:29<16:38, 3.64s/it] {'loss': 1.6471, 'grad_norm': 0.005322282473107534, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:29<16:38, 3.64s/it] 48%|████▊ | 247/520 [15:33<16:32, 3.63s/it] {'loss': 1.5139, 'grad_norm': 0.004696520964068454, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:33<16:32, 3.63s/it] 48%|████▊ | 248/520 [15:37<16:27, 3.63s/it] {'loss': 1.3295, 'grad_norm': 0.005022191987444716, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:37<16:27, 3.63s/it] 48%|████▊ | 249/520 [15:40<16:25, 3.64s/it] {'loss': 1.4504, 'grad_norm': 0.005153414033355197, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:40<16:25, 3.64s/it] 48%|████▊ | 250/520 [15:44<16:24, 3.65s/it] {'loss': 1.3805, 'grad_norm': 0.004639999413607452, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:44<16:24, 3.65s/it] 48%|████▊ | 251/520 [15:48<16:18, 3.64s/it] {'loss': 1.4508, 'grad_norm': 0.00476188257472027, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:48<16:18, 3.64s/it] 48%|████▊ | 252/520 [15:51<16:17, 3.65s/it] {'loss': 1.5107, 'grad_norm': 0.004600023533612869, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:51<16:17, 3.65s/it] 49%|████▊ | 253/520 [15:55<16:16, 3.66s/it] {'loss': 1.4476, 'grad_norm': 0.005041821367019237, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:55<16:16, 3.66s/it] 49%|████▉ | 254/520 [15:59<16:14, 3.66s/it] {'loss': 1.3492, 'grad_norm': 0.004129021949748144, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:59<16:14, 3.66s/it] 49%|████▉ | 255/520 [16:02<16:12, 3.67s/it] {'loss': 1.3655, 'grad_norm': 0.004656118802765727, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:02<16:12, 3.67s/it] 49%|████▉ | 256/520 [16:06<16:07, 3.66s/it] {'loss': 1.4082, 'grad_norm': 0.005250529083126252, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:06<16:07, 3.66s/it] 49%|████▉ | 257/520 [16:10<16:05, 3.67s/it] {'loss': 1.4169, 'grad_norm': 0.00446181699073152, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:10<16:05, 3.67s/it] 50%|████▉ | 258/520 [16:13<16:01, 3.67s/it] {'loss': 1.4261, 'grad_norm': 0.0037547745287811536, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:13<16:01, 3.67s/it] 50%|████▉ | 259/520 [16:17<15:55, 3.66s/it] {'loss': 1.4888, 'grad_norm': 0.00511954671318011, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:17<15:55, 3.66s/it] 50%|█████ | 260/520 [16:21<15:52, 3.66s/it] {'loss': 1.5997, 'grad_norm': 0.004624904436191884, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:21<15:52, 3.66s/it] 50%|█████ | 261/520 [16:24<15:49, 3.67s/it] {'loss': 1.5193, 'grad_norm': 0.004677117627227274, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:24<15:49, 3.67s/it] 50%|█████ | 262/520 [16:28<15:46, 3.67s/it] {'loss': 1.3216, 'grad_norm': 0.00473133939979434, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:28<15:46, 3.67s/it] 51%|█████ | 263/520 [16:32<15:40, 3.66s/it] {'loss': 1.5191, 'grad_norm': 0.004734087153993459, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:32<15:40, 3.66s/it] 51%|█████ | 264/520 [16:35<15:37, 3.66s/it] {'loss': 1.4548, 'grad_norm': 0.00427394006902276, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:35<15:37, 3.66s/it] 51%|█████ | 265/520 [16:39<15:35, 3.67s/it] {'loss': 1.3291, 'grad_norm': 0.005482594820637019, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:39<15:35, 3.67s/it] 51%|█████ | 266/520 [16:43<15:30, 3.66s/it] {'loss': 1.1727, 'grad_norm': 0.0040277064632352096, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:43<15:30, 3.66s/it] 51%|█████▏ | 267/520 [16:46<15:29, 3.67s/it] {'loss': 1.3323, 'grad_norm': 0.0044045671540297, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:46<15:29, 3.67s/it] 52%|█████▏ | 268/520 [16:50<15:27, 3.68s/it] {'loss': 1.666, 'grad_norm': 0.005645225773120796, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:50<15:27, 3.68s/it] 52%|█████▏ | 269/520 [16:54<15:20, 3.67s/it] {'loss': 1.4504, 'grad_norm': 0.00461760562506341, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:54<15:20, 3.67s/it] 52%|█████▏ | 270/520 [16:57<15:18, 3.68s/it] {'loss': 1.4299, 'grad_norm': 0.004736501227881029, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:57<15:18, 3.68s/it] 52%|█████▏ | 271/520 [17:01<15:16, 3.68s/it] {'loss': 1.4542, 'grad_norm': 0.0045916515136412645, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:01<15:16, 3.68s/it] 52%|█████▏ | 272/520 [17:05<15:16, 3.70s/it] {'loss': 1.4464, 'grad_norm': 0.005322061723744254, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:05<15:16, 3.70s/it] 52%|█████▎ | 273/520 [17:08<15:10, 3.69s/it] {'loss': 1.5989, 'grad_norm': 0.0056509600500690505, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:08<15:10, 3.69s/it] 53%|█████▎ | 274/520 [17:12<15:02, 3.67s/it] {'loss': 1.3814, 'grad_norm': 0.004593332858066125, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:12<15:02, 3.67s/it] 53%|█████▎ | 275/520 [17:16<14:56, 3.66s/it] {'loss': 1.3272, 'grad_norm': 0.005564363870913483, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:16<14:56, 3.66s/it] 53%|█████▎ | 276/520 [17:19<14:52, 3.66s/it] {'loss': 1.4252, 'grad_norm': 0.0046074123362072, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:19<14:52, 3.66s/it] 53%|█████▎ | 277/520 [17:23<14:45, 3.65s/it] {'loss': 1.5738, 'grad_norm': 0.004524135578826675, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:23<14:45, 3.65s/it] 53%|█████▎ | 278/520 [17:27<14:40, 3.64s/it] {'loss': 1.2742, 'grad_norm': 0.004219498606375926, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:27<14:40, 3.64s/it] 54%|█████▎ | 279/520 [17:30<14:37, 3.64s/it] {'loss': 1.4652, 'grad_norm': 0.0053966895881482, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:30<14:37, 3.64s/it] 54%|█████▍ | 280/520 [17:34<14:33, 3.64s/it] {'loss': 1.3376, 'grad_norm': 0.004671076280709849, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:34<14:33, 3.64s/it] 54%|█████▍ | 281/520 [17:37<14:29, 3.64s/it] {'loss': 1.445, 'grad_norm': 0.004690380374280058, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:37<14:29, 3.64s/it] 54%|█████▍ | 282/520 [17:41<14:25, 3.64s/it] {'loss': 1.2862, 'grad_norm': 0.004113129586227074, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:41<14:25, 3.64s/it] 54%|█████▍ | 283/520 [17:45<14:22, 3.64s/it] {'loss': 1.4907, 'grad_norm': 0.0049935138611202, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:45<14:22, 3.64s/it] 55%|█████▍ | 284/520 [17:48<14:19, 3.64s/it] {'loss': 1.4325, 'grad_norm': 0.005201143090554084, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:48<14:19, 3.64s/it] 55%|█████▍ | 285/520 [17:52<14:15, 3.64s/it] {'loss': 1.3169, 'grad_norm': 0.0045089204140119354, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:52<14:15, 3.64s/it] 55%|█████▌ | 286/520 [17:56<14:20, 3.68s/it] {'loss': 1.1739, 'grad_norm': 0.004299281846345162, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:56<14:20, 3.68s/it] 55%|█████▌ | 287/520 [18:00<14:27, 3.72s/it] {'loss': 1.442, 'grad_norm': 0.005177780785190248, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:00<14:27, 3.72s/it] 55%|█████▌ | 288/520 [18:03<14:30, 3.75s/it] {'loss': 1.5042, 'grad_norm': 0.0044863659833856385, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:03<14:30, 3.75s/it] 56%|█████▌ | 289/520 [18:07<14:32, 3.78s/it] {'loss': 1.3336, 'grad_norm': 0.0041360044813739194, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:07<14:32, 3.78s/it] 56%|█████▌ | 290/520 [18:11<14:31, 3.79s/it] {'loss': 1.2437, 'grad_norm': 0.003935596073921671, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:11<14:31, 3.79s/it] 56%|█████▌ | 291/520 [18:15<14:29, 3.80s/it] {'loss': 1.3093, 'grad_norm': 0.005139572585127088, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:15<14:29, 3.80s/it] 56%|█████▌ | 292/520 [18:19<14:27, 3.80s/it] {'loss': 1.3637, 'grad_norm': 0.004144626334997892, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:19<14:27, 3.80s/it] 56%|█████▋ | 293/520 [18:23<14:24, 3.81s/it] {'loss': 1.3, 'grad_norm': 0.004594615832980558, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:23<14:24, 3.81s/it] 57%|█████▋ | 294/520 [18:26<14:21, 3.81s/it] {'loss': 1.3339, 'grad_norm': 0.005014921585540305, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:26<14:21, 3.81s/it] 57%|█████▋ | 295/520 [18:30<14:06, 3.76s/it] {'loss': 1.5179, 'grad_norm': 0.0058731342342533185, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:30<14:06, 3.76s/it] 57%|█████▋ | 296/520 [18:34<13:54, 3.73s/it] {'loss': 1.2707, 'grad_norm': 0.004414473924465298, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:34<13:54, 3.73s/it] 57%|█████▋ | 297/520 [18:37<13:45, 3.70s/it] {'loss': 1.4109, 'grad_norm': 0.004841918461207294, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:37<13:45, 3.70s/it] 57%|█████▋ | 298/520 [18:41<13:35, 3.67s/it] {'loss': 1.3726, 'grad_norm': 0.004277179745601957, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:41<13:35, 3.67s/it] 57%|█████▊ | 299/520 [18:45<13:30, 3.67s/it] {'loss': 1.5173, 'grad_norm': 0.004720848553807492, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:45<13:30, 3.67s/it] 58%|█████▊ | 300/520 [18:48<13:25, 3.66s/it] {'loss': 1.4433, 'grad_norm': 0.004370277929476548, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:48<13:25, 3.66s/it] 58%|█████▊ | 301/520 [18:52<13:19, 3.65s/it] {'loss': 1.3964, 'grad_norm': 0.004553381263857183, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:52<13:19, 3.65s/it] 58%|█████▊ | 302/520 [18:56<13:17, 3.66s/it] {'loss': 1.5262, 'grad_norm': 0.0048123246867815515, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:56<13:17, 3.66s/it] 58%|█████▊ | 303/520 [18:59<13:13, 3.65s/it] {'loss': 1.3231, 'grad_norm': 0.004597925655121408, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [18:59<13:13, 3.65s/it] 58%|█████▊ | 304/520 [19:03<13:10, 3.66s/it] {'loss': 1.4146, 'grad_norm': 0.004440195962845895, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:03<13:10, 3.66s/it] 59%|█████▊ | 305/520 [19:07<13:06, 3.66s/it] {'loss': 1.4668, 'grad_norm': 0.005012163240322709, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:07<13:06, 3.66s/it] 59%|█████▉ | 306/520 [19:10<13:02, 3.66s/it] {'loss': 1.381, 'grad_norm': 0.00437334263385695, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:10<13:02, 3.66s/it] 59%|█████▉ | 307/520 [19:14<12:57, 3.65s/it] {'loss': 1.3255, 'grad_norm': 0.004091096453674815, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:14<12:57, 3.65s/it] 59%|█████▉ | 308/520 [19:17<12:54, 3.65s/it] {'loss': 1.4496, 'grad_norm': 0.004284139569377348, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:17<12:54, 3.65s/it] 59%|█████▉ | 309/520 [19:21<13:10, 3.75s/it] {'loss': 1.309, 'grad_norm': 0.0040822714359896204, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:21<13:10, 3.75s/it] 60%|█████▉ | 310/520 [19:25<13:00, 3.72s/it] {'loss': 1.2895, 'grad_norm': 0.004218345028050034, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:25<13:00, 3.72s/it] 60%|█████▉ | 311/520 [19:29<12:51, 3.69s/it] {'loss': 1.2591, 'grad_norm': 0.0040100130049302695, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:29<12:51, 3.69s/it] 60%|██████ | 312/520 [19:32<12:44, 3.68s/it] {'loss': 1.2408, 'grad_norm': 0.004454825262671506, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:32<12:44, 3.68s/it] 60%|██████ | 313/520 [19:36<12:37, 3.66s/it] {'loss': 1.2317, 'grad_norm': 0.003943343636423281, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:36<12:37, 3.66s/it] 60%|██████ | 314/520 [19:40<13:04, 3.81s/it] {'loss': 1.2714, 'grad_norm': 0.004291608375223792, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:40<13:04, 3.81s/it] 61%|██████ | 315/520 [19:44<12:57, 3.79s/it] {'loss': 1.5342, 'grad_norm': 0.008293679555901922, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:44<12:57, 3.79s/it] 61%|██████ | 316/520 [19:48<13:20, 3.92s/it] {'loss': 1.2346, 'grad_norm': 0.0051111842318857965, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:48<13:20, 3.92s/it] 61%|██████ | 317/520 [19:52<13:06, 3.87s/it] {'loss': 1.2691, 'grad_norm': 0.004427342241371354, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [19:52<13:06, 3.87s/it] 61%|██████ | 318/520 [19:56<12:55, 3.84s/it] {'loss': 1.4062, 'grad_norm': 0.005109070309889114, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [19:56<12:55, 3.84s/it] 61%|██████▏ | 319/520 [20:00<13:08, 3.92s/it] {'loss': 1.2575, 'grad_norm': 0.00493568175595488, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:00<13:08, 3.92s/it] 62%|██████▏ | 320/520 [20:04<12:55, 3.88s/it] {'loss': 1.1939, 'grad_norm': 0.004314941451783133, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:04<12:55, 3.88s/it] 62%|██████▏ | 321/520 [20:07<12:49, 3.87s/it] {'loss': 1.4096, 'grad_norm': 0.004315413508639262, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:07<12:49, 3.87s/it] 62%|██████▏ | 322/520 [20:11<12:40, 3.84s/it] {'loss': 1.3343, 'grad_norm': 0.005648373709929007, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:11<12:40, 3.84s/it] 62%|██████▏ | 323/520 [20:15<12:32, 3.82s/it] {'loss': 1.424, 'grad_norm': 0.005250283732601686, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:15<12:32, 3.82s/it] 62%|██████▏ | 324/520 [20:19<12:25, 3.80s/it] {'loss': 1.337, 'grad_norm': 0.00479244235958741, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:19<12:25, 3.80s/it] 62%|██████▎ | 325/520 [20:22<12:21, 3.80s/it] {'loss': 1.3598, 'grad_norm': 0.005208086306424303, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:22<12:21, 3.80s/it] 63%|██████▎ | 326/520 [20:26<12:16, 3.80s/it] {'loss': 1.325, 'grad_norm': 0.005003292411050252, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:26<12:16, 3.80s/it] 63%|██████▎ | 327/520 [20:30<12:05, 3.76s/it] {'loss': 1.5107, 'grad_norm': 0.006373595408241427, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:30<12:05, 3.76s/it] 63%|██████▎ | 328/520 [20:34<11:55, 3.73s/it] {'loss': 1.417, 'grad_norm': 0.00435592625026455, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:34<11:55, 3.73s/it] 63%|██████▎ | 329/520 [20:37<11:49, 3.71s/it] {'loss': 1.2474, 'grad_norm': 0.004272315807303159, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:37<11:49, 3.71s/it] 63%|██████▎ | 330/520 [20:41<11:45, 3.71s/it] {'loss': 1.3303, 'grad_norm': 0.005575224146158294, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:41<11:45, 3.71s/it] 64%|██████▎ | 331/520 [20:45<11:39, 3.70s/it] {'loss': 1.2958, 'grad_norm': 0.004363930879664815, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:45<11:39, 3.70s/it] 64%|██████▍ | 332/520 [20:48<11:34, 3.70s/it] {'loss': 1.4989, 'grad_norm': 0.004564007482895448, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:48<11:34, 3.70s/it] 64%|██████▍ | 333/520 [20:52<11:27, 3.68s/it] {'loss': 1.4659, 'grad_norm': 0.004478962587989626, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:52<11:27, 3.68s/it] 64%|██████▍ | 334/520 [20:56<11:23, 3.68s/it] {'loss': 1.3403, 'grad_norm': 0.004600948755972442, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:56<11:23, 3.68s/it] 64%|██████▍ | 335/520 [20:59<11:18, 3.67s/it] {'loss': 1.3348, 'grad_norm': 0.0044848860219806, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [20:59<11:18, 3.67s/it] 65%|██████▍ | 336/520 [21:03<11:13, 3.66s/it] {'loss': 1.2074, 'grad_norm': 0.004901573723511923, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:03<11:13, 3.66s/it] 65%|██████▍ | 337/520 [21:07<11:09, 3.66s/it] {'loss': 1.2105, 'grad_norm': 0.00419932560773691, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:07<11:09, 3.66s/it] 65%|██████▌ | 338/520 [21:10<11:06, 3.66s/it] {'loss': 1.3542, 'grad_norm': 0.005251953501705619, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:10<11:06, 3.66s/it] 65%|██████▌ | 339/520 [21:14<11:02, 3.66s/it] {'loss': 1.2889, 'grad_norm': 0.004856817778693446, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:14<11:02, 3.66s/it] 65%|██████▌ | 340/520 [21:18<11:01, 3.67s/it] {'loss': 1.2675, 'grad_norm': 0.004239691476441833, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:18<11:01, 3.67s/it] 66%|██████▌ | 341/520 [21:21<10:56, 3.66s/it] {'loss': 1.2958, 'grad_norm': 0.004568901414083303, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:21<10:56, 3.66s/it] 66%|██████▌ | 342/520 [21:25<10:52, 3.66s/it] {'loss': 1.4729, 'grad_norm': 0.005143897887743516, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:25<10:52, 3.66s/it] 66%|██████▌ | 343/520 [21:29<10:51, 3.68s/it] {'loss': 1.4545, 'grad_norm': 0.005192115682082849, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:29<10:51, 3.68s/it] 66%|██████▌ | 344/520 [21:32<10:46, 3.67s/it] {'loss': 1.2326, 'grad_norm': 0.00412562246474934, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:32<10:46, 3.67s/it] 66%|██████▋ | 345/520 [21:36<10:45, 3.69s/it] {'loss': 1.3727, 'grad_norm': 0.004711230421476559, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:36<10:45, 3.69s/it] 67%|██████▋ | 346/520 [21:40<10:39, 3.68s/it] {'loss': 1.4118, 'grad_norm': 0.004331713518251266, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:40<10:39, 3.68s/it] 67%|██████▋ | 347/520 [21:43<10:33, 3.66s/it] {'loss': 1.2575, 'grad_norm': 0.004083961583905554, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:43<10:33, 3.66s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:47<10:28, 3.66s/it] {'loss': 1.2285, 'grad_norm': 0.005065207166782071, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:47<10:28, 3.66s/it] 67%|██████▋ | 349/520 [21:51<10:24, 3.65s/it] {'loss': 1.2668, 'grad_norm': 0.00434470440477044, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:51<10:24, 3.65s/it] 67%|██████▋ | 350/520 [21:54<10:20, 3.65s/it] {'loss': 1.2956, 'grad_norm': 0.004640981564260983, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:54<10:20, 3.65s/it] 68%|██████▊ | 351/520 [21:58<10:18, 3.66s/it] {'loss': 1.2003, 'grad_norm': 0.003957277737191969, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [21:58<10:18, 3.66s/it] 68%|██████▊ | 352/520 [22:02<10:13, 3.65s/it] {'loss': 1.3278, 'grad_norm': 0.004338167300284735, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:02<10:13, 3.65s/it] 68%|██████▊ | 353/520 [22:05<10:11, 3.66s/it] {'loss': 1.3492, 'grad_norm': 0.003903583605781116, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:05<10:11, 3.66s/it] 68%|██████▊ | 354/520 [22:09<10:09, 3.67s/it] {'loss': 1.511, 'grad_norm': 0.004339808556920737, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:09<10:09, 3.67s/it] 68%|██████▊ | 355/520 [22:13<10:03, 3.66s/it] {'loss': 1.2706, 'grad_norm': 0.00412403550742298, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:13<10:03, 3.66s/it] 68%|██████▊ | 356/520 [22:16<09:59, 3.65s/it] {'loss': 1.266, 'grad_norm': 0.004184236694788831, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:16<09:59, 3.65s/it] 69%|██████▊ | 357/520 [22:20<09:54, 3.65s/it] {'loss': 1.2775, 'grad_norm': 0.0038639158510703495, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:20<09:54, 3.65s/it] 69%|██████▉ | 358/520 [22:23<09:52, 3.65s/it] {'loss': 1.2179, 'grad_norm': 0.004344322810448636, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:23<09:52, 3.65s/it] 69%|██████▉ | 359/520 [22:27<09:49, 3.66s/it] {'loss': 1.4191, 'grad_norm': 0.004674290870367978, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:27<09:49, 3.66s/it] 69%|██████▉ | 360/520 [22:31<09:46, 3.66s/it] {'loss': 1.4444, 'grad_norm': 0.005061268286938981, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:31<09:46, 3.66s/it] 69%|██████▉ | 361/520 [22:34<09:41, 3.66s/it] {'loss': 1.4282, 'grad_norm': 0.003971482342684129, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:34<09:41, 3.66s/it] 70%|██████▉ | 362/520 [22:38<09:37, 3.66s/it] {'loss': 1.2802, 'grad_norm': 0.004487098286674016, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:38<09:37, 3.66s/it] 70%|██████▉ | 363/520 [22:42<09:33, 3.65s/it] {'loss': 1.3173, 'grad_norm': 0.00414718996276737, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:42<09:33, 3.65s/it] 70%|███████ | 364/520 [22:45<09:30, 3.66s/it] {'loss': 1.4508, 'grad_norm': 0.004415657985874211, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:45<09:30, 3.66s/it] 70%|███████ | 365/520 [22:49<09:27, 3.66s/it] {'loss': 1.3842, 'grad_norm': 0.004485504733927147, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:49<09:27, 3.66s/it] 70%|███████ | 366/520 [22:53<09:24, 3.67s/it] {'loss': 1.3256, 'grad_norm': 0.003975399513391913, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [22:53<09:24, 3.67s/it] 71%|███████ | 367/520 [22:56<09:21, 3.67s/it] {'loss': 1.3288, 'grad_norm': 0.004437629094186106, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [22:56<09:21, 3.67s/it] 71%|███████ | 368/520 [23:00<09:17, 3.67s/it] {'loss': 1.1792, 'grad_norm': 0.004591209222557177, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:00<09:17, 3.67s/it] 71%|███████ | 369/520 [23:04<09:13, 3.67s/it] {'loss': 1.3968, 'grad_norm': 0.004425263550680128, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:04<09:13, 3.67s/it] 71%|███████ | 370/520 [23:07<09:10, 3.67s/it] {'loss': 1.2361, 'grad_norm': 0.0038513669630555972, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:07<09:10, 3.67s/it] 71%|███████▏ | 371/520 [23:11<09:04, 3.65s/it] {'loss': 1.2391, 'grad_norm': 0.004517913897702141, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:11<09:04, 3.65s/it] 72%|███████▏ | 372/520 [23:15<09:00, 3.65s/it] {'loss': 1.5074, 'grad_norm': 0.00431019178451368, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:15<09:00, 3.65s/it] 72%|███████▏ | 373/520 [23:18<08:56, 3.65s/it] {'loss': 1.3724, 'grad_norm': 0.004628299680716336, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:18<08:56, 3.65s/it] 72%|███████▏ | 374/520 [23:22<08:50, 3.64s/it] {'loss': 1.3063, 'grad_norm': 0.004222093354277948, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:22<08:50, 3.64s/it] 72%|███████▏ | 375/520 [23:26<08:46, 3.63s/it] {'loss': 1.2182, 'grad_norm': 0.004278758583222315, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:26<08:46, 3.63s/it] 72%|███████▏ | 376/520 [23:29<08:45, 3.65s/it] {'loss': 1.3478, 'grad_norm': 0.003936672297974159, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:29<08:45, 3.65s/it] 72%|███████▎ | 377/520 [23:33<08:42, 3.66s/it] {'loss': 1.2895, 'grad_norm': 0.004546554721462452, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:33<08:42, 3.66s/it] 73%|███████▎ | 378/520 [23:37<08:39, 3.66s/it] {'loss': 1.3349, 'grad_norm': 0.004425694452871889, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:37<08:39, 3.66s/it] 73%|███████▎ | 379/520 [23:40<08:34, 3.65s/it] {'loss': 1.3266, 'grad_norm': 0.00401638335732128, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:40<08:34, 3.65s/it] 73%|███████▎ | 380/520 [23:44<08:31, 3.66s/it] {'loss': 1.4787, 'grad_norm': 0.005131209007543904, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:44<08:31, 3.66s/it] 73%|███████▎ | 381/520 [23:48<08:29, 3.67s/it] {'loss': 1.315, 'grad_norm': 0.0041074548538187535, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:48<08:29, 3.67s/it] 73%|███████▎ | 382/520 [23:51<08:27, 3.68s/it] {'loss': 1.416, 'grad_norm': 0.004540254512322685, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:51<08:27, 3.68s/it] 74%|███████▎ | 383/520 [23:55<08:23, 3.68s/it] {'loss': 1.1519, 'grad_norm': 0.004379651789714737, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [23:55<08:23, 3.68s/it] 74%|███████▍ | 384/520 [23:59<08:20, 3.68s/it] {'loss': 1.5578, 'grad_norm': 0.005046150129691609, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [23:59<08:20, 3.68s/it] 74%|███████▍ | 385/520 [24:02<08:15, 3.67s/it] {'loss': 1.2903, 'grad_norm': 0.004095747260800211, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:02<08:15, 3.67s/it] 74%|███████▍ | 386/520 [24:06<08:10, 3.66s/it] {'loss': 1.2318, 'grad_norm': 0.0038432602025846104, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:06<08:10, 3.66s/it] 74%|███████▍ | 387/520 [24:10<08:07, 3.67s/it] {'loss': 1.4977, 'grad_norm': 0.004149454406764734, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:10<08:07, 3.67s/it] 75%|███████▍ | 388/520 [24:13<08:04, 3.67s/it] {'loss': 1.1778, 'grad_norm': 0.003709473028421474, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:13<08:04, 3.67s/it] 75%|███████▍ | 389/520 [24:17<08:00, 3.67s/it] {'loss': 1.257, 'grad_norm': 0.004552420414334479, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:17<08:00, 3.67s/it] 75%|███████▌ | 390/520 [24:21<07:56, 3.66s/it] {'loss': 1.3061, 'grad_norm': 0.004071482964087442, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:21<07:56, 3.66s/it] 75%|███████▌ | 391/520 [24:24<07:53, 3.67s/it] {'loss': 1.4061, 'grad_norm': 0.004111091886809183, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:24<07:53, 3.67s/it] 75%|███████▌ | 392/520 [24:28<07:49, 3.67s/it] {'loss': 1.196, 'grad_norm': 0.004161246856506152, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:28<07:49, 3.67s/it] 76%|███████▌ | 393/520 [24:32<07:45, 3.67s/it] {'loss': 1.2792, 'grad_norm': 0.0039740916732359955, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:32<07:45, 3.67s/it] 76%|███████▌ | 394/520 [24:35<07:40, 3.66s/it] {'loss': 1.2666, 'grad_norm': 0.004299892872259774, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:35<07:40, 3.66s/it] 76%|███████▌ | 395/520 [24:39<07:38, 3.67s/it] {'loss': 1.2224, 'grad_norm': 0.0043540719366517815, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:39<07:38, 3.67s/it] 76%|███████▌ | 396/520 [24:43<07:34, 3.66s/it] {'loss': 1.3166, 'grad_norm': 0.004365126086592159, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:43<07:34, 3.66s/it] 76%|███████▋ | 397/520 [24:46<07:30, 3.66s/it] {'loss': 1.296, 'grad_norm': 0.004142905276254086, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:46<07:30, 3.66s/it] 77%|███████▋ | 398/520 [24:50<07:25, 3.65s/it] {'loss': 1.2859, 'grad_norm': 0.0042748942133861425, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [24:50<07:25, 3.65s/it] 77%|███████▋ | 399/520 [24:54<07:21, 3.65s/it] {'loss': 1.3316, 'grad_norm': 0.004346789856157071, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [24:54<07:21, 3.65s/it] 77%|███████▋ | 400/520 [24:57<07:19, 3.66s/it] {'loss': 1.4059, 'grad_norm': 0.004253383492608274, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [24:57<07:19, 3.66s/it] 77%|███████▋ | 401/520 [25:01<07:14, 3.65s/it] {'loss': 1.0945, 'grad_norm': 0.004313794004343395, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:01<07:14, 3.65s/it] 77%|███████▋ | 402/520 [25:05<07:12, 3.66s/it] {'loss': 1.2246, 'grad_norm': 0.00403585516724549, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:05<07:12, 3.66s/it] 78%|███████▊ | 403/520 [25:08<07:14, 3.71s/it] {'loss': 1.2682, 'grad_norm': 0.004427501022350709, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:08<07:14, 3.71s/it] 78%|███████▊ | 404/520 [25:12<07:08, 3.70s/it] {'loss': 1.1693, 'grad_norm': 0.005366969120182613, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:12<07:08, 3.70s/it] 78%|███████▊ | 405/520 [25:16<07:03, 3.69s/it] {'loss': 1.34, 'grad_norm': 0.004067451834619823, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:16<07:03, 3.69s/it] 78%|███████▊ | 406/520 [25:19<06:59, 3.68s/it] {'loss': 1.2732, 'grad_norm': 0.005190181973833374, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:19<06:59, 3.68s/it] 78%|███████▊ | 407/520 [25:23<06:54, 3.67s/it] {'loss': 1.3596, 'grad_norm': 0.004371885972027766, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:23<06:54, 3.67s/it] 78%|███████▊ | 408/520 [25:27<06:49, 3.65s/it] {'loss': 1.2475, 'grad_norm': 0.004670763604547947, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:27<06:49, 3.65s/it] 79%|███████▊ | 409/520 [25:30<06:44, 3.65s/it] {'loss': 1.3845, 'grad_norm': 0.004726831876314855, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:30<06:44, 3.65s/it] 79%|███████▉ | 410/520 [25:34<06:40, 3.64s/it] {'loss': 1.0798, 'grad_norm': 0.003962763433164402, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:34<06:40, 3.64s/it] 79%|███████▉ | 411/520 [25:38<06:37, 3.64s/it] {'loss': 1.3449, 'grad_norm': 0.004615029642198704, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:38<06:37, 3.64s/it] 79%|███████▉ | 412/520 [25:41<06:33, 3.64s/it] {'loss': 1.2628, 'grad_norm': 0.004106484549966809, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:41<06:33, 3.64s/it] 79%|███████▉ | 413/520 [25:45<06:28, 3.63s/it] {'loss': 1.3855, 'grad_norm': 0.004121496268370926, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:45<06:28, 3.63s/it] 80%|███████▉ | 414/520 [25:48<06:25, 3.63s/it] {'loss': 1.151, 'grad_norm': 0.0037032197265971676, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [25:48<06:25, 3.63s/it] 80%|███████▉ | 415/520 [25:52<06:21, 3.63s/it] {'loss': 1.2367, 'grad_norm': 0.003970066669663845, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [25:52<06:21, 3.63s/it] 80%|████████ | 416/520 [25:56<06:17, 3.63s/it] {'loss': 1.1492, 'grad_norm': 0.004820906371963647, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [25:56<06:17, 3.63s/it] 80%|████████ | 417/520 [25:59<06:13, 3.63s/it] {'loss': 1.3302, 'grad_norm': 0.004928093515803104, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [25:59<06:13, 3.63s/it] 80%|████████ | 418/520 [26:03<06:10, 3.63s/it] {'loss': 1.3066, 'grad_norm': 0.003941305640898528, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:03<06:10, 3.63s/it] 81%|████████ | 419/520 [26:07<06:07, 3.64s/it] {'loss': 1.2912, 'grad_norm': 0.004413298572010014, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:07<06:07, 3.64s/it] 81%|████████ | 420/520 [26:10<06:04, 3.64s/it] {'loss': 1.1696, 'grad_norm': 0.00449746348811969, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:10<06:04, 3.64s/it] 81%|████████ | 421/520 [26:14<06:00, 3.64s/it] {'loss': 1.0964, 'grad_norm': 0.004578244406163514, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:14<06:00, 3.64s/it] 81%|████████ | 422/520 [26:18<05:56, 3.63s/it] {'loss': 1.2274, 'grad_norm': 0.004661424994225879, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:18<05:56, 3.63s/it] 81%|████████▏ | 423/520 [26:21<05:52, 3.63s/it] {'loss': 1.2315, 'grad_norm': 0.004976305913628542, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:21<05:52, 3.63s/it] 82%|████████▏ | 424/520 [26:25<05:49, 3.65s/it] {'loss': 1.4586, 'grad_norm': 0.004884271684452109, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:25<05:49, 3.65s/it] 82%|████████▏ | 425/520 [26:29<05:48, 3.66s/it] {'loss': 1.2262, 'grad_norm': 0.003923917487192202, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:29<05:48, 3.66s/it] 82%|████████▏ | 426/520 [26:32<05:44, 3.67s/it] {'loss': 1.2569, 'grad_norm': 0.005558273241392103, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:32<05:44, 3.67s/it] 82%|████████▏ | 427/520 [26:36<05:40, 3.67s/it] {'loss': 1.1608, 'grad_norm': 0.004403972808749636, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:36<05:40, 3.67s/it] 82%|████████▏ | 428/520 [26:40<05:37, 3.66s/it] {'loss': 1.1402, 'grad_norm': 0.004426674728043634, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:40<05:37, 3.66s/it] 82%|████████▎ | 429/520 [26:43<05:33, 3.67s/it] {'loss': 1.2384, 'grad_norm': 0.004008077333015796, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:43<05:33, 3.67s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [26:47<05:29, 3.66s/it] {'loss': 1.2297, 'grad_norm': 0.003888048048276099, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [26:47<05:29, 3.66s/it] 83%|████████▎ | 431/520 [26:50<05:25, 3.66s/it] {'loss': 1.3391, 'grad_norm': 0.00466484574605081, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [26:50<05:25, 3.66s/it] 83%|████████▎ | 432/520 [26:54<05:21, 3.65s/it] {'loss': 1.143, 'grad_norm': 0.0048479233137641645, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [26:54<05:21, 3.65s/it] 83%|████████▎ | 433/520 [26:58<05:17, 3.65s/it] {'loss': 1.2825, 'grad_norm': 0.004044819184526079, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [26:58<05:17, 3.65s/it] 83%|████████▎ | 434/520 [27:01<05:14, 3.65s/it] {'loss': 1.014, 'grad_norm': 0.0040512347473904715, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:01<05:14, 3.65s/it] 84%|████████▎ | 435/520 [27:05<05:09, 3.64s/it] {'loss': 1.3183, 'grad_norm': 0.004421254627639748, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:05<05:09, 3.64s/it] 84%|████████▍ | 436/520 [27:09<05:06, 3.64s/it] {'loss': 1.1028, 'grad_norm': 0.0043499053017662884, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:09<05:06, 3.64s/it] 84%|████████▍ | 437/520 [27:12<05:03, 3.66s/it] {'loss': 1.3514, 'grad_norm': 0.004272688744645678, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:12<05:03, 3.66s/it] 84%|████████▍ | 438/520 [27:16<04:59, 3.65s/it] {'loss': 1.1362, 'grad_norm': 0.00404957150478302, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:16<04:59, 3.65s/it] 84%|████████▍ | 439/520 [27:20<04:57, 3.67s/it] {'loss': 1.2988, 'grad_norm': 0.003643447902055983, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:20<04:57, 3.67s/it] 85%|████████▍ | 440/520 [27:24<04:56, 3.70s/it] {'loss': 1.2084, 'grad_norm': 0.00435840627456639, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:24<04:56, 3.70s/it] 85%|████████▍ | 441/520 [27:27<04:54, 3.73s/it] {'loss': 1.3427, 'grad_norm': 0.004220571052624271, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:27<04:54, 3.73s/it] 85%|████████▌ | 442/520 [27:31<04:51, 3.74s/it] {'loss': 1.2535, 'grad_norm': 0.004956350708626266, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:31<04:51, 3.74s/it] 85%|████████▌ | 443/520 [27:35<04:48, 3.75s/it] {'loss': 1.2774, 'grad_norm': 0.004322943485240111, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:35<04:48, 3.75s/it] 85%|████████▌ | 444/520 [27:39<04:45, 3.75s/it] {'loss': 1.2451, 'grad_norm': 0.0037951144743066097, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:39<04:45, 3.75s/it] 86%|████████▌ | 445/520 [27:42<04:41, 3.75s/it] {'loss': 1.1513, 'grad_norm': 0.004120554619692903, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:42<04:41, 3.75s/it] 86%|████████▌ | 446/520 [27:46<04:38, 3.76s/it] {'loss': 1.4133, 'grad_norm': 0.004521993859824105, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [27:46<04:38, 3.76s/it] 86%|████████▌ | 447/520 [27:50<04:34, 3.76s/it] {'loss': 1.2618, 'grad_norm': 0.004477778670689902, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [27:50<04:34, 3.76s/it] 86%|████████▌ | 448/520 [27:54<04:31, 3.77s/it] {'loss': 1.2258, 'grad_norm': 0.00413063463816767, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [27:54<04:31, 3.77s/it] 86%|████████▋ | 449/520 [27:57<04:27, 3.77s/it] {'loss': 1.3602, 'grad_norm': 0.00439543709154427, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [27:57<04:27, 3.77s/it] 87%|████████▋ | 450/520 [28:01<04:23, 3.76s/it] {'loss': 1.2741, 'grad_norm': 0.004189936517622792, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:01<04:23, 3.76s/it] 87%|████████▋ | 451/520 [28:05<04:19, 3.76s/it] {'loss': 1.2727, 'grad_norm': 0.004342339675924744, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:05<04:19, 3.76s/it] 87%|████████▋ | 452/520 [28:09<04:15, 3.76s/it] {'loss': 1.4094, 'grad_norm': 0.004266387166953322, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:09<04:15, 3.76s/it] 87%|████████▋ | 453/520 [28:12<04:11, 3.76s/it] {'loss': 1.3856, 'grad_norm': 0.005205733725779815, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:12<04:11, 3.76s/it] 87%|████████▋ | 454/520 [28:16<04:08, 3.77s/it] {'loss': 1.1741, 'grad_norm': 0.004246413059170506, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:16<04:08, 3.77s/it] 88%|████████▊ | 455/520 [28:20<04:04, 3.77s/it] {'loss': 1.3085, 'grad_norm': 0.004034929056163388, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:20<04:04, 3.77s/it] 88%|████████▊ | 456/520 [28:24<04:00, 3.76s/it] {'loss': 1.2215, 'grad_norm': 0.004076306233361256, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:24<04:00, 3.76s/it] 88%|████████▊ | 457/520 [28:28<03:56, 3.76s/it] {'loss': 1.3762, 'grad_norm': 0.003864184990723261, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:28<03:56, 3.76s/it] 88%|████████▊ | 458/520 [28:31<03:52, 3.76s/it] {'loss': 1.3777, 'grad_norm': 0.00433646481072187, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:31<03:52, 3.76s/it] 88%|████████▊ | 459/520 [28:35<03:49, 3.76s/it] {'loss': 1.305, 'grad_norm': 0.003939495126569891, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:35<03:49, 3.76s/it] 88%|████████▊ | 460/520 [28:39<03:46, 3.77s/it] {'loss': 1.1642, 'grad_norm': 0.004087001177594256, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:39<03:46, 3.77s/it] 89%|████████▊ | 461/520 [28:43<03:42, 3.76s/it] {'loss': 1.4586, 'grad_norm': 0.0036507216850968083, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:43<03:42, 3.76s/it] 89%|████████▉ | 462/520 [28:46<03:38, 3.77s/it] {'loss': 1.4555, 'grad_norm': 0.004188107351478745, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [28:46<03:38, 3.77s/it] 89%|████████▉ | 463/520 [28:50<03:34, 3.77s/it] {'loss': 1.1254, 'grad_norm': 0.004610056771021924, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [28:50<03:34, 3.77s/it] 89%|████████▉ | 464/520 [28:54<03:31, 3.77s/it] {'loss': 1.2883, 'grad_norm': 0.004380167432887164, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [28:54<03:31, 3.77s/it] 89%|████████▉ | 465/520 [28:58<03:26, 3.76s/it] {'loss': 1.4065, 'grad_norm': 0.004781265694538346, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [28:58<03:26, 3.76s/it] 90%|████████▉ | 466/520 [29:01<03:20, 3.72s/it] {'loss': 1.2676, 'grad_norm': 0.0036852167783444716, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:01<03:20, 3.72s/it] 90%|████████▉ | 467/520 [29:05<03:16, 3.70s/it] {'loss': 1.3268, 'grad_norm': 0.004059398893324473, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:05<03:16, 3.70s/it] 90%|█████████ | 468/520 [29:09<03:10, 3.67s/it] {'loss': 1.2475, 'grad_norm': 0.0045548966016140495, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:09<03:10, 3.67s/it] 90%|█████████ | 469/520 [29:12<03:06, 3.66s/it] {'loss': 1.3016, 'grad_norm': 0.004250851018181237, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:12<03:06, 3.66s/it] 90%|█████████ | 470/520 [29:16<03:02, 3.65s/it] {'loss': 1.1693, 'grad_norm': 0.0036046281298696783, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:16<03:02, 3.65s/it] 91%|█████████ | 471/520 [29:19<02:58, 3.64s/it] {'loss': 1.1963, 'grad_norm': 0.004135051758196268, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:19<02:58, 3.64s/it] 91%|█████████ | 472/520 [29:23<02:54, 3.64s/it] {'loss': 1.1681, 'grad_norm': 0.003930549580575518, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:23<02:54, 3.64s/it] 91%|█████████ | 473/520 [29:27<02:51, 3.64s/it] {'loss': 1.2252, 'grad_norm': 0.004181269221985481, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:27<02:51, 3.64s/it] 91%|█████████ | 474/520 [29:30<02:47, 3.64s/it] {'loss': 1.3628, 'grad_norm': 0.003917742840220859, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:30<02:47, 3.64s/it] 91%|█████████▏| 475/520 [29:34<02:43, 3.64s/it] {'loss': 1.2833, 'grad_norm': 0.0040967799690865125, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:34<02:43, 3.64s/it] 92%|█████████▏| 476/520 [29:38<02:41, 3.66s/it] {'loss': 1.2221, 'grad_norm': 0.004315160961707238, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:38<02:41, 3.66s/it] 92%|█████████▏| 477/520 [29:41<02:38, 3.67s/it] {'loss': 1.2003, 'grad_norm': 0.005015742406939551, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:41<02:38, 3.67s/it] 92%|█████████▏| 478/520 [29:45<02:34, 3.68s/it] {'loss': 1.165, 'grad_norm': 0.0038952311571420197, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [29:45<02:34, 3.68s/it] 92%|█████████▏| 479/520 [29:49<02:30, 3.67s/it] {'loss': 1.3432, 'grad_norm': 0.004616849799120623, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [29:49<02:30, 3.67s/it] 92%|█████████▏| 480/520 [29:52<02:26, 3.67s/it] {'loss': 1.3638, 'grad_norm': 0.004267502206830463, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [29:52<02:26, 3.67s/it] 92%|█████████▎| 481/520 [29:56<02:23, 3.68s/it] {'loss': 1.3743, 'grad_norm': 0.0038766968503044577, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [29:56<02:23, 3.68s/it] 93%|█████████▎| 482/520 [30:00<02:19, 3.67s/it] {'loss': 1.3757, 'grad_norm': 0.004213408147796581, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:00<02:19, 3.67s/it] 93%|█████████▎| 483/520 [30:03<02:15, 3.67s/it] {'loss': 1.2444, 'grad_norm': 0.004303501090916023, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:03<02:15, 3.67s/it] 93%|█████████▎| 484/520 [30:07<02:12, 3.68s/it] {'loss': 1.246, 'grad_norm': 0.004242984000513978, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:07<02:12, 3.68s/it] 93%|█████████▎| 485/520 [30:11<02:08, 3.67s/it] {'loss': 1.1839, 'grad_norm': 0.003862722250700161, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:11<02:08, 3.67s/it] 93%|█████████▎| 486/520 [30:14<02:04, 3.67s/it] {'loss': 1.3158, 'grad_norm': 0.004508020691612626, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:14<02:04, 3.67s/it] 94%|█████████▎| 487/520 [30:18<02:01, 3.68s/it] {'loss': 1.1637, 'grad_norm': 0.004014905054585106, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:18<02:01, 3.68s/it] 94%|█████████▍| 488/520 [30:22<01:57, 3.67s/it] {'loss': 1.1031, 'grad_norm': 0.004351042431296488, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:22<01:57, 3.67s/it] 94%|█████████▍| 489/520 [30:25<01:53, 3.66s/it] {'loss': 1.3595, 'grad_norm': 0.003651562392181066, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:25<01:53, 3.66s/it] 94%|█████████▍| 490/520 [30:29<01:49, 3.65s/it] {'loss': 1.2395, 'grad_norm': 0.004241832989961942, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:29<01:49, 3.65s/it] 94%|█████████▍| 491/520 [30:33<01:45, 3.65s/it] {'loss': 1.1879, 'grad_norm': 0.0042588367106485125, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:33<01:45, 3.65s/it] 95%|█████████▍| 492/520 [30:36<01:41, 3.63s/it] {'loss': 1.315, 'grad_norm': 0.004410942035234816, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:36<01:41, 3.63s/it] 95%|█████████▍| 493/520 [30:40<01:38, 3.65s/it] {'loss': 1.4263, 'grad_norm': 0.004290511450521242, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:40<01:38, 3.65s/it] 95%|█████████▌| 494/520 [30:44<01:34, 3.64s/it] {'loss': 1.2557, 'grad_norm': 0.0037331248781776304, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [30:44<01:34, 3.64s/it] 95%|█████████▌| 495/520 [30:47<01:30, 3.62s/it] {'loss': 1.2034, 'grad_norm': 0.0040587242129306615, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [30:47<01:30, 3.62s/it] 95%|█████████▌| 496/520 [30:51<01:27, 3.63s/it] {'loss': 1.1199, 'grad_norm': 0.004057605595898738, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [30:51<01:27, 3.63s/it] 96%|█████████▌| 497/520 [30:55<01:23, 3.64s/it] {'loss': 1.283, 'grad_norm': 0.0036752924807354416, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [30:55<01:23, 3.64s/it] 96%|█████████▌| 498/520 [30:58<01:20, 3.64s/it] {'loss': 1.2118, 'grad_norm': 0.0044854718608835645, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [30:58<01:20, 3.64s/it] 96%|█████████▌| 499/520 [31:02<01:16, 3.64s/it] {'loss': 1.4379, 'grad_norm': 0.004331017497565963, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:02<01:16, 3.64s/it] 96%|█████████▌| 500/520 [31:05<01:12, 3.64s/it] {'loss': 1.3282, 'grad_norm': 0.004974050333603527, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:05<01:12, 3.64s/it] 96%|█████████▋| 501/520 [31:09<01:09, 3.64s/it] {'loss': 1.3929, 'grad_norm': 0.005529716867648379, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:09<01:09, 3.64s/it] 97%|█████████▋| 502/520 [31:13<01:05, 3.63s/it] {'loss': 1.2475, 'grad_norm': 0.003965193519270131, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:13<01:05, 3.63s/it] 97%|█████████▋| 503/520 [31:16<01:01, 3.63s/it] {'loss': 1.3238, 'grad_norm': 0.004309939537899372, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:16<01:01, 3.63s/it] 97%|█████████▋| 504/520 [31:20<00:57, 3.62s/it] {'loss': 1.2408, 'grad_norm': 0.004855088958997596, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:20<00:57, 3.62s/it] 97%|█████████▋| 505/520 [31:24<00:54, 3.66s/it] {'loss': 1.2962, 'grad_norm': 0.004140917941102332, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:24<00:54, 3.66s/it] 97%|█████████▋| 506/520 [31:27<00:51, 3.68s/it] {'loss': 1.1898, 'grad_norm': 0.00438293549468909, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:27<00:51, 3.68s/it] 98%|█████████▊| 507/520 [31:31<00:48, 3.76s/it] {'loss': 1.4793, 'grad_norm': 0.004101469123382733, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:31<00:48, 3.76s/it] 98%|█████████▊| 508/520 [31:35<00:45, 3.77s/it] {'loss': 1.3217, 'grad_norm': 0.004088026471495541, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:35<00:45, 3.77s/it] 98%|█████████▊| 509/520 [31:39<00:41, 3.74s/it] {'loss': 1.2795, 'grad_norm': 0.004004701111744767, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:39<00:41, 3.74s/it] 98%|█████████▊| 510/520 [31:42<00:37, 3.70s/it] {'loss': 1.2432, 'grad_norm': 0.0040283977128747145, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [31:42<00:37, 3.70s/it] 98%|█████████▊| 511/520 [31:46<00:33, 3.68s/it] {'loss': 1.2125, 'grad_norm': 0.003903669805231351, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [31:46<00:33, 3.68s/it] 98%|█████████▊| 512/520 [31:50<00:29, 3.67s/it] {'loss': 1.0935, 'grad_norm': 0.004012919293725019, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [31:50<00:29, 3.67s/it] 99%|█████████▊| 513/520 [31:53<00:25, 3.68s/it] {'loss': 1.2997, 'grad_norm': 0.004402911292748029, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [31:53<00:25, 3.68s/it] 99%|█████████▉| 514/520 [31:57<00:22, 3.74s/it] {'loss': 1.2784, 'grad_norm': 0.003740073653666578, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [31:57<00:22, 3.74s/it] 99%|█████████▉| 515/520 [32:01<00:18, 3.76s/it] {'loss': 1.3346, 'grad_norm': 0.004709732889760893, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:01<00:18, 3.76s/it] 99%|█████████▉| 516/520 [32:05<00:15, 3.78s/it] {'loss': 1.1986, 'grad_norm': 0.0041140382991142555, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:05<00:15, 3.78s/it] 99%|█████████▉| 517/520 [32:09<00:11, 3.77s/it] {'loss': 1.401, 'grad_norm': 0.004211807856217579, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:09<00:11, 3.77s/it] 100%|█████████▉| 518/520 [32:12<00:07, 3.77s/it] {'loss': 1.2389, 'grad_norm': 0.004184046598234636, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:12<00:07, 3.77s/it] 100%|█████████▉| 519/520 [32:16<00:03, 3.77s/it] {'loss': 1.3305, 'grad_norm': 0.004252080182918251, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:16<00:03, 3.77s/it] 100%|██████████| 520/520 [32:21<00:00, 4.03s/it] {'loss': 1.4285, 'grad_norm': 0.004284508938924648, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:21<00:00, 4.03s/it] {'train_runtime': 1941.3401, 'train_samples_per_second': 34.27, 'train_steps_per_second': 0.268, 'train_loss': 1.47738057076931, 'epoch': 1.0} + 100%|██████████| 520/520 [32:21<00:00, 4.03s/it] 100%|██████████| 520/520 [32:21<00:00, 3.73s/it] +[2025-10-10 09:14:55,141] [INFO] [launch.py:348:main] Process 707003 exits successfully. +[2025-10-10 09:14:55,141] [INFO] [launch.py:348:main] Process 707004 exits successfully. +[2025-10-10 09:14:56,142] [INFO] [launch.py:348:main] Process 707000 exits successfully. +[2025-10-10 09:14:56,143] [INFO] [launch.py:348:main] Process 707001 exits successfully. +[2025-10-10 09:14:56,143] [INFO] [launch.py:348:main] Process 706998 exits successfully. +[2025-10-10 09:14:56,144] [INFO] [launch.py:348:main] Process 707002 exits successfully. +[2025-10-10 09:14:56,144] [INFO] [launch.py:348:main] Process 706999 exits successfully. +[2025-10-10 09:15:00,149] [INFO] [launch.py:348:main] Process 706997 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.1_2e-1_connector-1.0_1.1_2e-1_ablation_20251010_084100.log +Timestamp: 2025-10-10 09:15:02 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_060752.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_060752.log new file mode 100644 index 0000000000000000000000000000000000000000..b13c1782d02c1537fabaae437136ab049c4e26dd --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_060752.log @@ -0,0 +1,4 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_060752.log +Timestamp: 2025-10-10 06:07:52 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_091502.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_091502.log new file mode 100644 index 0000000000000000000000000000000000000000..e533d95b209b37586097630de72e93674da8c032 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_091502.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_091502.log +Timestamp: 2025-10-10 09:15:02 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 09:15:05,290] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:08,106] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 09:15:08,108] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 1.3 --temperature_mlp_text 1.3 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 1.3 --temperature_mlp_vision 1.3 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 1.3 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 09:15:10,727] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:11,844] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 09:15:11,844] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 09:15:11,845] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 09:15:11,845] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 09:15:11,845] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 09:15:11,845] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 09:15:11,845] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 09:15:11,847] [INFO] [launch.py:253:main] process 728927 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:11,849] [INFO] [launch.py:253:main] process 728928 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:11,851] [INFO] [launch.py:253:main] process 728929 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:11,853] [INFO] [launch.py:253:main] process 728930 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:11,855] [INFO] [launch.py:253:main] process 728931 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:11,857] [INFO] [launch.py:253:main] process 728932 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:11,859] [INFO] [launch.py:253:main] process 728933 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:11,861] [INFO] [launch.py:253:main] process 728934 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.3', '--temperature_mlp_text', '1.3', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.3', '--temperature_mlp_vision', '1.3', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.3', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 09:15:18,630] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:18,824] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:18,987] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:18,987] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:18,988] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:18,989] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:18,989] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:18,990] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:19,035] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:19,035] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 09:15:19,227] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:19,388] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:19,389] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:19,389] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:19,390] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:19,390] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:19,392] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.3, 'temperature_mlp': 1.3, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.3, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.3, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.3, + "temperature_mlp": 1.3, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:728927:728927 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728927:728927 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728927:728927 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:728927:728927 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:728927:728927 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:728927:728927 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:728932:728932 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:728932:728932 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728932:728932 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728932:728932 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:728932:728932 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:728932:728932 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:728930:728930 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:728930:728930 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728930:728930 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728930:728930 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:728930:728930 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:728930:728930 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:728934:728934 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:728934:728934 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728934:728934 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728934:728934 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:728934:728934 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:728934:728934 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:728931:728931 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:728931:728931 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728931:728931 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728928:728928 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:728928:728928 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728928:728928 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728931:728931 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:728931:728931 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:728931:728931 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:728928:728928 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:728928:728928 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:728928:728928 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:728933:728933 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:728933:728933 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728933:728933 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728933:728933 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:728933:728933 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:728933:728933 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:728929:728929 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:728929:728929 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728929:728929 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728929:728929 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:728929:728929 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:728929:728929 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO ncclCommInitRank comm 0x5597182ecc20 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xaae5d503dc4333ee - Init START +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO ncclCommInitRank comm 0x55f9f0ac6220 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xaae5d503dc4333ee - Init START +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO ncclCommInitRank comm 0x55c3fdff9b00 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xaae5d503dc4333ee - Init START +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO ncclCommInitRank comm 0x5636a3191f00 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xaae5d503dc4333ee - Init START +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO ncclCommInitRank comm 0x5647df56f7e0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xaae5d503dc4333ee - Init START +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO ncclCommInitRank comm 0x56337d2cfdb0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xaae5d503dc4333ee - Init START +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO ncclCommInitRank comm 0x55b7a83f1b00 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xaae5d503dc4333ee - Init START +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO ncclCommInitRank comm 0x555694630ac0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xaae5d503dc4333ee - Init START +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO comm 0x55f9f0ac6220 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO comm 0x55c3fdff9b00 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO comm 0x55b7a83f1b00 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO comm 0x555694630ac0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO comm 0x56337d2cfdb0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO comm 0x5597182ecc20 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO comm 0x5647df56f7e0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO comm 0x5636a3191f00 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:728932:730506 [5] NCCL INFO ncclCommInitRank comm 0x5647df56f7e0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xaae5d503dc4333ee - Init COMPLETE +ywang29-vrdb-test2-worker-0:728931:730509 [4] NCCL INFO ncclCommInitRank comm 0x5636a3191f00 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xaae5d503dc4333ee - Init COMPLETE +ywang29-vrdb-test2-worker-0:728934:730508 [7] NCCL INFO ncclCommInitRank comm 0x56337d2cfdb0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xaae5d503dc4333ee - Init COMPLETE +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:728929:730512 [2] NCCL INFO ncclCommInitRank comm 0x55c3fdff9b00 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xaae5d503dc4333ee - Init COMPLETE +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:728930:730507 [3] NCCL INFO ncclCommInitRank comm 0x55f9f0ac6220 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xaae5d503dc4333ee - Init COMPLETE +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:728933:730511 [6] NCCL INFO ncclCommInitRank comm 0x5597182ecc20 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xaae5d503dc4333ee - Init COMPLETE +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:728928:730510 [1] NCCL INFO ncclCommInitRank comm 0x55b7a83f1b00 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xaae5d503dc4333ee - Init COMPLETE +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:728927:730489 [0] NCCL INFO ncclCommInitRank comm 0x555694630ac0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xaae5d503dc4333ee - Init COMPLETE +[2025-10-10 09:16:05,780] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 09:20:35,230] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Pre-training init connector._connector.0.scores: Mean=1.000005 +Pre-training init connector._connector.2.scores: Mean=0.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 09:20:53,704 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 09:20:53,709 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:003->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:728928:735633 [1] NCCL INFO ncclCommInitRank comm 0x7efd0406ad30 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x486454945dcf6e2f - Init COMPLETE +ywang29-vrdb-test2-worker-0:728932:735639 [5] NCCL INFO ncclCommInitRank comm 0x7f2dd806a7f0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x486454945dcf6e2f - Init COMPLETE +ywang29-vrdb-test2-worker-0:728929:735636 [2] NCCL INFO ncclCommInitRank comm 0x7fdb3406b0d0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x486454945dcf6e2f - Init COMPLETE +ywang29-vrdb-test2-worker-0:728933:735637 [6] NCCL INFO ncclCommInitRank comm 0x7f5f9406ab70 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x486454945dcf6e2f - Init COMPLETE +ywang29-vrdb-test2-worker-0:728931:735638 [4] NCCL INFO ncclCommInitRank comm 0x7f5b5406a660 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x486454945dcf6e2f - Init COMPLETE +ywang29-vrdb-test2-worker-0:728927:735632 [0] NCCL INFO ncclCommInitRank comm 0x7fb15406ad40 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x486454945dcf6e2f - Init COMPLETE +ywang29-vrdb-test2-worker-0:728934:735635 [7] NCCL INFO ncclCommInitRank comm 0x7f473806a370 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x486454945dcf6e2f - Init COMPLETE +ywang29-vrdb-test2-worker-0:728930:735634 [3] NCCL INFO ncclCommInitRank comm 0x7fe3a406ad10 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x486454945dcf6e2f - Init COMPLETE + 0%| | 1/520 [00:14<2:01:22, 14.03s/it] {'loss': 6.9171, 'grad_norm': 0.4488351869156624, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:01:22, 14.03s/it] 0%| | 2/520 [00:17<1:08:20, 7.92s/it] {'loss': 6.1731, 'grad_norm': 0.4519350910395735, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:08:20, 7.92s/it] 1%| | 3/520 [00:21<51:18, 5.95s/it] {'loss': 4.7827, 'grad_norm': 0.21626645491016777, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<51:18, 5.95s/it] 1%| | 4/520 [00:24<43:16, 5.03s/it] {'loss': 3.1464, 'grad_norm': 0.18750443653935206, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:24<43:16, 5.03s/it] 1%| | 5/520 [00:28<38:53, 4.53s/it] {'loss': 3.0379, 'grad_norm': 0.12205388588639712, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:28<38:53, 4.53s/it] 1%| | 6/520 [00:32<36:28, 4.26s/it] {'loss': 2.6633, 'grad_norm': 0.0878978640341176, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:32<36:28, 4.26s/it] 1%|▏ | 7/520 [00:35<34:38, 4.05s/it] {'loss': 1.9921, 'grad_norm': 0.04847386185086132, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:35<34:38, 4.05s/it] 2%|▏ | 8/520 [00:40<35:10, 4.12s/it] {'loss': 2.1517, 'grad_norm': 0.033736773714034905, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<35:10, 4.12s/it] 2%|▏ | 9/520 [00:44<35:12, 4.13s/it] {'loss': 2.065, 'grad_norm': 0.020543113850053175, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:44<35:12, 4.13s/it] 2%|▏ | 10/520 [00:47<33:54, 3.99s/it] {'loss': 1.7533, 'grad_norm': 0.022584501054669203, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<33:54, 3.99s/it] 2%|▏ | 11/520 [00:51<33:18, 3.93s/it] {'loss': 1.9227, 'grad_norm': 0.04497852715489127, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:51<33:18, 3.93s/it] 2%|▏ | 12/520 [00:55<32:44, 3.87s/it] {'loss': 2.2677, 'grad_norm': 0.06303859195702409, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:55<32:44, 3.87s/it][2025-10-10 09:21:58,051] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [00:59<34:07, 4.04s/it] {'loss': 1.9489, 'grad_norm': 0.04534677816025206, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [00:59<34:07, 4.04s/it] 3%|▎ | 14/520 [01:03<33:28, 3.97s/it] {'loss': 1.9032, 'grad_norm': 0.019143138391533945, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:03<33:28, 3.97s/it] 3%|▎ | 15/520 [01:07<32:40, 3.88s/it] {'loss': 2.1191, 'grad_norm': 0.02439662686853461, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:07<32:40, 3.88s/it] 3%|▎ | 16/520 [01:11<32:03, 3.82s/it] {'loss': 2.0004, 'grad_norm': 0.019902671223524376, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:11<32:03, 3.82s/it] 3%|▎ | 17/520 [01:14<31:50, 3.80s/it] {'loss': 1.9232, 'grad_norm': 0.017580473725257312, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:14<31:50, 3.80s/it] 3%|▎ | 18/520 [01:18<31:51, 3.81s/it] {'loss': 1.7191, 'grad_norm': 0.018622658029637924, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:18<31:51, 3.81s/it] 4%|▎ | 19/520 [01:22<31:42, 3.80s/it] {'loss': 2.1304, 'grad_norm': 0.056676761989502514, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:22<31:42, 3.80s/it] 4%|▍ | 20/520 [01:26<31:24, 3.77s/it] {'loss': 1.7393, 'grad_norm': 0.01332268555608101, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:26<31:24, 3.77s/it] 4%|▍ | 21/520 [01:29<31:21, 3.77s/it] {'loss': 2.3348, 'grad_norm': 0.030071945763117305, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:29<31:21, 3.77s/it] 4%|▍ | 22/520 [01:33<31:10, 3.76s/it] {'loss': 1.9408, 'grad_norm': 0.03267734952559947, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:33<31:10, 3.76s/it] 4%|▍ | 23/520 [01:37<31:11, 3.76s/it] {'loss': 1.8491, 'grad_norm': 0.024789084890615265, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:37<31:11, 3.76s/it] 5%|▍ | 24/520 [01:41<31:01, 3.75s/it] {'loss': 2.1635, 'grad_norm': 0.019629645193278835, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:41<31:01, 3.75s/it] 5%|▍ | 25/520 [01:44<30:58, 3.75s/it] {'loss': 1.8187, 'grad_norm': 0.015315456746150088, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:44<30:58, 3.75s/it] 5%|▌ | 26/520 [01:48<30:59, 3.77s/it] {'loss': 1.8341, 'grad_norm': 0.01491095446183299, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:48<30:59, 3.77s/it] 5%|▌ | 27/520 [01:52<31:02, 3.78s/it] {'loss': 1.6906, 'grad_norm': 0.014706907630798106, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:52<31:02, 3.78s/it] 5%|▌ | 28/520 [01:56<30:52, 3.77s/it] {'loss': 1.6631, 'grad_norm': 0.010023017794301849, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:56<30:52, 3.77s/it] 6%|▌ | 29/520 [01:59<30:30, 3.73s/it] {'loss': 1.6748, 'grad_norm': 0.007526048879648038, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [01:59<30:30, 3.73s/it] 6%|▌ | 30/520 [02:03<30:12, 3.70s/it] {'loss': 2.3085, 'grad_norm': 0.02650011738256118, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:03<30:12, 3.70s/it] 6%|▌ | 31/520 [02:07<30:02, 3.69s/it] {'loss': 1.6967, 'grad_norm': 0.018308470787552805, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:07<30:02, 3.69s/it] 6%|▌ | 32/520 [02:10<29:54, 3.68s/it] {'loss': 2.6978, 'grad_norm': 0.04800112115002182, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:10<29:54, 3.68s/it] 6%|▋ | 33/520 [02:14<29:41, 3.66s/it] {'loss': 1.8209, 'grad_norm': 0.04358458137134543, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:14<29:41, 3.66s/it] 7%|▋ | 34/520 [02:18<29:33, 3.65s/it] {'loss': 1.7116, 'grad_norm': 0.01784910576094143, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:18<29:33, 3.65s/it] 7%|▋ | 35/520 [02:21<29:23, 3.64s/it] {'loss': 1.744, 'grad_norm': 0.026681358819221648, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:21<29:23, 3.64s/it] 7%|▋ | 36/520 [02:25<29:18, 3.63s/it] {'loss': 1.8212, 'grad_norm': 0.009957918570082467, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:25<29:18, 3.63s/it] 7%|▋ | 37/520 [02:28<29:17, 3.64s/it] {'loss': 2.2548, 'grad_norm': 0.030696417748215462, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:28<29:17, 3.64s/it] 7%|▋ | 38/520 [02:32<29:11, 3.63s/it] {'loss': 1.9056, 'grad_norm': 0.009595732050195111, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:32<29:11, 3.63s/it] 8%|▊ | 39/520 [02:36<29:08, 3.64s/it] {'loss': 1.737, 'grad_norm': 0.02203168914085743, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:36<29:08, 3.64s/it] 8%|▊ | 40/520 [02:39<29:02, 3.63s/it] {'loss': 1.7219, 'grad_norm': 0.007125155038404557, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:39<29:02, 3.63s/it] 8%|▊ | 41/520 [02:43<29:10, 3.65s/it] {'loss': 1.7095, 'grad_norm': 0.014930300763642611, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:43<29:10, 3.65s/it] 8%|▊ | 42/520 [02:47<29:26, 3.70s/it] {'loss': 1.7301, 'grad_norm': 0.010974657001996973, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:47<29:26, 3.70s/it] 8%|▊ | 43/520 [02:51<29:37, 3.73s/it] {'loss': 1.9835, 'grad_norm': 0.017272175715028718, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:51<29:37, 3.73s/it] 8%|▊ | 44/520 [02:54<29:44, 3.75s/it] {'loss': 2.1766, 'grad_norm': 0.012567712579127721, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:54<29:44, 3.75s/it] 9%|▊ | 45/520 [02:58<29:45, 3.76s/it] {'loss': 1.7072, 'grad_norm': 0.009281144547931509, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [02:58<29:45, 3.76s/it] 9%|▉ | 46/520 [03:02<29:44, 3.76s/it] {'loss': 2.2224, 'grad_norm': 0.016257278210002706, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:02<29:44, 3.76s/it] 9%|▉ | 47/520 [03:06<29:44, 3.77s/it] {'loss': 1.6941, 'grad_norm': 0.007867033898282628, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:06<29:44, 3.77s/it] 9%|▉ | 48/520 [03:10<29:42, 3.78s/it] {'loss': 1.6666, 'grad_norm': 0.009629103946781854, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:10<29:42, 3.78s/it] 9%|▉ | 49/520 [03:13<29:34, 3.77s/it] {'loss': 1.6813, 'grad_norm': 0.007936924258674937, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:13<29:34, 3.77s/it] 10%|▉ | 50/520 [03:17<29:30, 3.77s/it] {'loss': 1.6878, 'grad_norm': 0.009107200354936654, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:17<29:30, 3.77s/it] 10%|▉ | 51/520 [03:21<29:26, 3.77s/it] {'loss': 1.5803, 'grad_norm': 0.008678817621066657, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:21<29:26, 3.77s/it] 10%|█ | 52/520 [03:25<29:22, 3.77s/it] {'loss': 1.7542, 'grad_norm': 0.008897035227120783, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:25<29:22, 3.77s/it] 10%|█ | 53/520 [03:28<29:16, 3.76s/it] {'loss': 1.7369, 'grad_norm': 0.008109239177141504, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:28<29:16, 3.76s/it] 10%|█ | 54/520 [03:32<29:14, 3.76s/it] {'loss': 1.5934, 'grad_norm': 0.00623716759848261, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:32<29:14, 3.76s/it] 11%|█ | 55/520 [03:36<29:09, 3.76s/it] {'loss': 1.5877, 'grad_norm': 0.008261196252877313, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:36<29:09, 3.76s/it] 11%|█ | 56/520 [03:40<29:06, 3.76s/it] {'loss': 1.76, 'grad_norm': 0.00670011178249097, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:40<29:06, 3.76s/it] 11%|█ | 57/520 [03:43<29:01, 3.76s/it] {'loss': 1.5945, 'grad_norm': 0.008892428121762832, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:43<29:01, 3.76s/it] 11%|█ | 58/520 [03:47<28:59, 3.76s/it] {'loss': 1.7609, 'grad_norm': 0.00806921052430723, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:47<28:59, 3.76s/it] 11%|█▏ | 59/520 [03:51<28:57, 3.77s/it] {'loss': 1.8641, 'grad_norm': 0.009489925274575461, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:51<28:57, 3.77s/it] 12%|█▏ | 60/520 [03:55<29:00, 3.78s/it] {'loss': 1.6724, 'grad_norm': 0.010090752813458533, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:55<29:00, 3.78s/it] 12%|█▏ | 61/520 [03:58<28:35, 3.74s/it] {'loss': 1.96, 'grad_norm': 0.010044168319671415, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:58<28:35, 3.74s/it] 12%|█▏ | 62/520 [04:02<28:16, 3.70s/it] {'loss': 1.6452, 'grad_norm': 0.006779099506861258, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:02<28:16, 3.70s/it] 12%|█▏ | 63/520 [04:06<28:10, 3.70s/it] {'loss': 1.6321, 'grad_norm': 0.007177409706079701, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:06<28:10, 3.70s/it] 12%|█▏ | 64/520 [04:09<27:59, 3.68s/it] {'loss': 1.6577, 'grad_norm': 0.005960965923657972, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:09<27:59, 3.68s/it] 12%|█▎ | 65/520 [04:13<27:51, 3.67s/it] {'loss': 1.6534, 'grad_norm': 0.006546315355226067, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:13<27:51, 3.67s/it] 13%|█▎ | 66/520 [04:17<27:48, 3.68s/it] {'loss': 1.6405, 'grad_norm': 0.009146087929302247, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:17<27:48, 3.68s/it] 13%|█▎ | 67/520 [04:20<27:43, 3.67s/it] {'loss': 1.4756, 'grad_norm': 0.006074670174327307, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:20<27:43, 3.67s/it] 13%|█▎ | 68/520 [04:24<27:39, 3.67s/it] {'loss': 1.5259, 'grad_norm': 0.005432209505608817, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:24<27:39, 3.67s/it] 13%|█▎ | 69/520 [04:28<27:36, 3.67s/it] {'loss': 1.5125, 'grad_norm': 0.00964485528392339, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:28<27:36, 3.67s/it] 13%|█▎ | 70/520 [04:31<27:31, 3.67s/it] {'loss': 1.5697, 'grad_norm': 0.006734151380047853, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:31<27:31, 3.67s/it] 14%|█▎ | 71/520 [04:35<27:26, 3.67s/it] {'loss': 1.4901, 'grad_norm': 0.0059466801338845495, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:35<27:26, 3.67s/it] 14%|█▍ | 72/520 [04:39<27:17, 3.66s/it] {'loss': 1.6494, 'grad_norm': 0.006693731788247011, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:39<27:17, 3.66s/it] 14%|█▍ | 73/520 [04:42<27:12, 3.65s/it] {'loss': 1.4685, 'grad_norm': 0.005944436364182389, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:42<27:12, 3.65s/it] 14%|█▍ | 74/520 [04:46<27:05, 3.64s/it] {'loss': 1.6097, 'grad_norm': 0.006763396505431942, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:46<27:05, 3.64s/it] 14%|█▍ | 75/520 [04:50<26:57, 3.64s/it] {'loss': 1.471, 'grad_norm': 0.0058633822886485385, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:50<26:57, 3.64s/it] 15%|█▍ | 76/520 [04:53<26:52, 3.63s/it] {'loss': 1.9608, 'grad_norm': 0.00811190604002116, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:53<26:52, 3.63s/it] 15%|█▍ | 77/520 [04:57<26:55, 3.65s/it] {'loss': 1.3998, 'grad_norm': 0.006203626645129093, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:57<26:55, 3.65s/it] 15%|█▌ | 78/520 [05:01<26:51, 3.65s/it] {'loss': 1.5544, 'grad_norm': 0.006800409574031227, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:01<26:51, 3.65s/it] 15%|█▌ | 79/520 [05:04<26:47, 3.65s/it] {'loss': 1.516, 'grad_norm': 0.005435918577045167, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:04<26:47, 3.65s/it] 15%|█▌ | 80/520 [05:08<26:44, 3.65s/it] {'loss': 1.9725, 'grad_norm': 0.007775416763069156, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:08<26:44, 3.65s/it] 16%|█▌ | 81/520 [05:11<26:41, 3.65s/it] {'loss': 1.7152, 'grad_norm': 0.009824900556736708, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:11<26:41, 3.65s/it] 16%|█▌ | 82/520 [05:15<26:38, 3.65s/it] {'loss': 1.6009, 'grad_norm': 0.006564232629561473, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:15<26:38, 3.65s/it] 16%|█▌ | 83/520 [05:19<26:36, 3.65s/it] {'loss': 1.6415, 'grad_norm': 0.005834035571711347, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:19<26:36, 3.65s/it] 16%|█▌ | 84/520 [05:22<26:29, 3.65s/it] {'loss': 1.6312, 'grad_norm': 0.006530557667712917, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:22<26:29, 3.65s/it] 16%|█▋ | 85/520 [05:26<26:27, 3.65s/it] {'loss': 1.6199, 'grad_norm': 0.0060256112837941056, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:26<26:27, 3.65s/it] 17%|█▋ | 86/520 [05:30<26:23, 3.65s/it] {'loss': 1.6813, 'grad_norm': 0.005905925502439055, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:30<26:23, 3.65s/it] 17%|█▋ | 87/520 [05:33<26:17, 3.64s/it] {'loss': 1.8852, 'grad_norm': 0.009458929326395457, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:33<26:17, 3.64s/it] 17%|█▋ | 88/520 [05:37<26:16, 3.65s/it] {'loss': 1.9515, 'grad_norm': 0.008237629938727863, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:37<26:16, 3.65s/it] 17%|█▋ | 89/520 [05:41<26:14, 3.65s/it] {'loss': 1.5873, 'grad_norm': 0.005312290922116676, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:41<26:14, 3.65s/it] 17%|█▋ | 90/520 [05:44<26:09, 3.65s/it] {'loss': 1.5133, 'grad_norm': 0.00535767273033536, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:44<26:09, 3.65s/it] 18%|█▊ | 91/520 [05:48<26:03, 3.64s/it] {'loss': 1.6127, 'grad_norm': 0.0056369374728100765, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:48<26:03, 3.64s/it] 18%|█▊ | 92/520 [05:52<25:59, 3.64s/it] {'loss': 1.5354, 'grad_norm': 0.006446308048226623, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:52<25:59, 3.64s/it] 18%|█▊ | 93/520 [05:55<25:54, 3.64s/it] {'loss': 1.5543, 'grad_norm': 0.007384015486474931, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:55<25:54, 3.64s/it] 18%|█▊ | 94/520 [05:59<25:49, 3.64s/it] {'loss': 1.6724, 'grad_norm': 0.0060108468948812796, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [05:59<25:49, 3.64s/it] 18%|█▊ | 95/520 [06:03<26:02, 3.68s/it] {'loss': 1.5186, 'grad_norm': 0.006232663269178791, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:03<26:02, 3.68s/it] 18%|█▊ | 96/520 [06:06<26:13, 3.71s/it] {'loss': 1.5281, 'grad_norm': 0.0055077042420931446, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:06<26:13, 3.71s/it] 19%|█▊ | 97/520 [06:10<26:18, 3.73s/it] {'loss': 1.5068, 'grad_norm': 0.0070368919537613425, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:10<26:18, 3.73s/it] 19%|█▉ | 98/520 [06:14<26:17, 3.74s/it] {'loss': 1.4918, 'grad_norm': 0.004914766433440631, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:14<26:17, 3.74s/it] 19%|█▉ | 99/520 [06:18<26:18, 3.75s/it] {'loss': 1.5265, 'grad_norm': 0.006364700108767455, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:18<26:18, 3.75s/it] 19%|█▉ | 100/520 [06:21<26:20, 3.76s/it] {'loss': 1.7514, 'grad_norm': 0.012751589959677893, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:22<26:20, 3.76s/it] 19%|█▉ | 101/520 [06:25<26:16, 3.76s/it] {'loss': 1.5153, 'grad_norm': 0.005960461231688844, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:25<26:16, 3.76s/it] 20%|█▉ | 102/520 [06:29<26:10, 3.76s/it] {'loss': 1.537, 'grad_norm': 0.007472555247990662, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:29<26:10, 3.76s/it] 20%|█▉ | 103/520 [06:33<26:04, 3.75s/it] {'loss': 1.4478, 'grad_norm': 0.005235503280414937, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:33<26:04, 3.75s/it] 20%|██ | 104/520 [06:37<26:02, 3.76s/it] {'loss': 1.5399, 'grad_norm': 0.007452536651187178, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:37<26:02, 3.76s/it] 20%|██ | 105/520 [06:40<26:01, 3.76s/it] {'loss': 1.5272, 'grad_norm': 0.005470918861127675, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:40<26:01, 3.76s/it] 20%|██ | 106/520 [06:44<25:55, 3.76s/it] {'loss': 1.71, 'grad_norm': 0.006197024454707077, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:44<25:55, 3.76s/it] 21%|██ | 107/520 [06:48<25:53, 3.76s/it] {'loss': 1.6906, 'grad_norm': 0.006561419136143545, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:48<25:53, 3.76s/it] 21%|██ | 108/520 [06:52<25:51, 3.76s/it] {'loss': 1.4886, 'grad_norm': 0.005903466845468101, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [06:52<25:51, 3.76s/it] 21%|██ | 109/520 [06:55<25:50, 3.77s/it] {'loss': 1.6732, 'grad_norm': 0.008351969023501358, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [06:55<25:50, 3.77s/it] 21%|██ | 110/520 [06:59<25:47, 3.78s/it] {'loss': 1.6882, 'grad_norm': 0.005565848082614111, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [06:59<25:47, 3.78s/it] 21%|██▏ | 111/520 [07:03<25:44, 3.78s/it] {'loss': 1.7134, 'grad_norm': 0.007550484964619408, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:03<25:44, 3.78s/it] 22%|██▏ | 112/520 [07:07<25:46, 3.79s/it] {'loss': 1.5586, 'grad_norm': 0.0053679516350390545, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:07<25:46, 3.79s/it] 22%|██▏ | 113/520 [07:11<25:38, 3.78s/it] {'loss': 1.4116, 'grad_norm': 0.004421177376587031, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:11<25:38, 3.78s/it] 22%|██▏ | 114/520 [07:14<25:32, 3.78s/it] {'loss': 1.5429, 'grad_norm': 0.006308071067805432, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:14<25:32, 3.78s/it] 22%|██▏ | 115/520 [07:18<25:29, 3.78s/it] {'loss': 1.6714, 'grad_norm': 0.005548836788083806, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:18<25:29, 3.78s/it] 22%|██▏ | 116/520 [07:22<25:22, 3.77s/it] {'loss': 1.6452, 'grad_norm': 0.004712863399714753, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:22<25:22, 3.77s/it] 22%|██▎ | 117/520 [07:26<25:19, 3.77s/it] {'loss': 1.634, 'grad_norm': 0.005205474913366869, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:26<25:19, 3.77s/it] 23%|██▎ | 118/520 [07:29<25:12, 3.76s/it] {'loss': 1.5019, 'grad_norm': 0.005246582793781533, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:29<25:12, 3.76s/it] 23%|██▎ | 119/520 [07:33<25:00, 3.74s/it] {'loss': 1.4329, 'grad_norm': 0.005263626750276881, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:33<25:00, 3.74s/it] 23%|██▎ | 120/520 [07:37<24:44, 3.71s/it] {'loss': 1.4696, 'grad_norm': 0.0070960515773974215, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:37<24:44, 3.71s/it] 23%|██▎ | 121/520 [07:40<24:30, 3.69s/it] {'loss': 1.5311, 'grad_norm': 0.006167609366575413, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:40<24:30, 3.69s/it] 23%|██▎ | 122/520 [07:44<24:21, 3.67s/it] {'loss': 1.4113, 'grad_norm': 0.005235647006882124, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:44<24:21, 3.67s/it] 24%|██▎ | 123/520 [07:48<24:20, 3.68s/it] {'loss': 1.7517, 'grad_norm': 0.009583221084894787, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:48<24:20, 3.68s/it] 24%|██▍ | 124/520 [07:51<24:13, 3.67s/it] {'loss': 1.5274, 'grad_norm': 0.005583710675448267, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:51<24:13, 3.67s/it] 24%|██▍ | 125/520 [07:55<24:11, 3.67s/it] {'loss': 1.4924, 'grad_norm': 0.0051894868356618185, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:55<24:11, 3.67s/it] 24%|██▍ | 126/520 [07:59<25:23, 3.87s/it] {'loss': 1.6233, 'grad_norm': 0.005184575649166658, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [07:59<25:23, 3.87s/it] 24%|██▍ | 127/520 [08:03<24:57, 3.81s/it] {'loss': 1.4709, 'grad_norm': 0.006153940278327894, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:03<24:57, 3.81s/it] 25%|██▍ | 128/520 [08:07<24:36, 3.77s/it] {'loss': 1.5307, 'grad_norm': 0.005937681043996254, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:07<24:36, 3.77s/it] 25%|██▍ | 129/520 [08:10<24:20, 3.74s/it] {'loss': 1.4339, 'grad_norm': 0.00424369168223716, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:10<24:20, 3.74s/it] 25%|██▌ | 130/520 [08:14<24:07, 3.71s/it] {'loss': 1.5217, 'grad_norm': 0.004846195070408273, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:14<24:07, 3.71s/it] 25%|██▌ | 131/520 [08:18<23:56, 3.69s/it] {'loss': 1.604, 'grad_norm': 0.005471638457140102, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:18<23:56, 3.69s/it] 25%|██▌ | 132/520 [08:21<23:49, 3.68s/it] {'loss': 1.5442, 'grad_norm': 0.005143189738367786, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:21<23:49, 3.68s/it] 26%|██▌ | 133/520 [08:25<23:41, 3.67s/it] {'loss': 1.4507, 'grad_norm': 0.005509330678881207, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:25<23:41, 3.67s/it] 26%|██▌ | 134/520 [08:29<23:36, 3.67s/it] {'loss': 1.5387, 'grad_norm': 0.005058409338459345, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:29<23:36, 3.67s/it] 26%|██▌ | 135/520 [08:32<23:32, 3.67s/it] {'loss': 1.6291, 'grad_norm': 0.00549824289751797, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:32<23:32, 3.67s/it] 26%|██▌ | 136/520 [08:36<23:30, 3.67s/it] {'loss': 1.534, 'grad_norm': 0.005106506465677344, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:36<23:30, 3.67s/it] 26%|██▋ | 137/520 [08:40<23:31, 3.68s/it] {'loss': 1.4664, 'grad_norm': 0.006745439835696369, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:40<23:31, 3.68s/it] 27%|██▋ | 138/520 [08:43<23:25, 3.68s/it] {'loss': 1.4505, 'grad_norm': 0.005020277530336941, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:43<23:25, 3.68s/it] 27%|██▋ | 139/520 [08:47<23:23, 3.68s/it] {'loss': 1.4916, 'grad_norm': 0.0057055977897191405, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:47<23:23, 3.68s/it] 27%|██▋ | 140/520 [08:51<23:12, 3.66s/it] {'loss': 1.6684, 'grad_norm': 0.007072177478951192, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:51<23:12, 3.66s/it] 27%|██▋ | 141/520 [08:54<23:06, 3.66s/it] {'loss': 1.5828, 'grad_norm': 0.004967315785794356, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:54<23:06, 3.66s/it] 27%|██▋ | 142/520 [08:58<23:02, 3.66s/it] {'loss': 1.701, 'grad_norm': 0.005950463715704252, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [08:58<23:02, 3.66s/it] 28%|██▊ | 143/520 [09:02<22:57, 3.65s/it] {'loss': 1.5084, 'grad_norm': 0.006962149988581575, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:02<22:57, 3.65s/it] 28%|██▊ | 144/520 [09:05<22:50, 3.64s/it] {'loss': 1.4438, 'grad_norm': 0.005106719718883115, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:05<22:50, 3.64s/it] 28%|██▊ | 145/520 [09:09<22:48, 3.65s/it] {'loss': 1.3706, 'grad_norm': 0.005719272051797726, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:09<22:48, 3.65s/it] 28%|██▊ | 146/520 [09:12<22:42, 3.64s/it] {'loss': 1.7317, 'grad_norm': 0.005699760516612694, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:12<22:42, 3.64s/it] 28%|██▊ | 147/520 [09:16<22:42, 3.65s/it] {'loss': 1.4122, 'grad_norm': 0.0052220819560357895, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:16<22:42, 3.65s/it] 28%|██▊ | 148/520 [09:20<22:36, 3.65s/it] {'loss': 1.458, 'grad_norm': 0.005695496949568625, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:20<22:36, 3.65s/it] 29%|██▊ | 149/520 [09:23<22:35, 3.65s/it] {'loss': 1.4009, 'grad_norm': 0.004913273651488661, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:23<22:35, 3.65s/it] 29%|██▉ | 150/520 [09:27<22:30, 3.65s/it] {'loss': 1.6497, 'grad_norm': 0.005632247373066391, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:27<22:30, 3.65s/it] 29%|██▉ | 151/520 [09:31<22:26, 3.65s/it] {'loss': 1.4366, 'grad_norm': 0.005368191149022276, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:31<22:26, 3.65s/it] 29%|██▉ | 152/520 [09:34<22:22, 3.65s/it] {'loss': 1.4079, 'grad_norm': 0.006139439949640262, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:34<22:22, 3.65s/it] 29%|██▉ | 153/520 [09:38<22:18, 3.65s/it] {'loss': 1.4412, 'grad_norm': 0.005073854487114997, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:38<22:18, 3.65s/it] 30%|██▉ | 154/520 [09:42<22:13, 3.64s/it] {'loss': 1.5391, 'grad_norm': 0.006108503824229984, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:42<22:13, 3.64s/it] 30%|██▉ | 155/520 [09:45<22:11, 3.65s/it] {'loss': 1.4352, 'grad_norm': 0.005542705598161079, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:45<22:11, 3.65s/it] 30%|███ | 156/520 [09:49<22:14, 3.67s/it] {'loss': 1.4655, 'grad_norm': 0.005369838078611585, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [09:49<22:14, 3.67s/it] 30%|███ | 157/520 [09:53<22:09, 3.66s/it] {'loss': 1.7409, 'grad_norm': 0.00621228755747108, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [09:53<22:09, 3.66s/it] 30%|███ | 158/520 [09:56<22:08, 3.67s/it] {'loss': 1.4443, 'grad_norm': 0.0070685457052463195, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [09:56<22:08, 3.67s/it] 31%|███ | 159/520 [10:00<22:03, 3.67s/it] {'loss': 1.4585, 'grad_norm': 0.004410062006307485, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:00<22:03, 3.67s/it] 31%|███ | 160/520 [10:04<21:59, 3.66s/it] {'loss': 1.5038, 'grad_norm': 0.00591978208138183, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:04<21:59, 3.66s/it] 31%|███ | 161/520 [10:07<21:56, 3.67s/it] {'loss': 1.4938, 'grad_norm': 0.005057677293417514, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:07<21:56, 3.67s/it] 31%|███ | 162/520 [10:11<21:52, 3.67s/it] {'loss': 1.6463, 'grad_norm': 0.0059107112963413455, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:11<21:52, 3.67s/it] 31%|███▏ | 163/520 [10:15<21:48, 3.67s/it] {'loss': 1.3495, 'grad_norm': 0.007508634956081673, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:15<21:48, 3.67s/it] 32%|███▏ | 164/520 [10:18<21:41, 3.66s/it] {'loss': 1.3051, 'grad_norm': 0.004645300030966777, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:18<21:41, 3.66s/it] 32%|███▏ | 165/520 [10:22<21:37, 3.66s/it] {'loss': 1.4631, 'grad_norm': 0.0046114391387477496, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:22<21:37, 3.66s/it] 32%|███▏ | 166/520 [10:26<21:33, 3.65s/it] {'loss': 1.4707, 'grad_norm': 0.006901775335265966, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:26<21:33, 3.65s/it] 32%|███▏ | 167/520 [10:29<21:30, 3.66s/it] {'loss': 1.4533, 'grad_norm': 0.00599712691055088, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:29<21:30, 3.66s/it] 32%|███▏ | 168/520 [10:33<21:23, 3.65s/it] {'loss': 1.3853, 'grad_norm': 0.0064029547481820786, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:33<21:23, 3.65s/it] 32%|███▎ | 169/520 [10:37<21:23, 3.66s/it] {'loss': 1.467, 'grad_norm': 0.006210024795562823, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:37<21:23, 3.66s/it] 33%|███▎ | 170/520 [10:40<21:18, 3.65s/it] {'loss': 1.552, 'grad_norm': 0.005130915835335281, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:40<21:18, 3.65s/it] 33%|███▎ | 171/520 [10:44<21:17, 3.66s/it] {'loss': 1.3987, 'grad_norm': 0.005373529947853349, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:44<21:17, 3.66s/it] 33%|███▎ | 172/520 [10:48<21:13, 3.66s/it] {'loss': 1.4693, 'grad_norm': 0.00819133885647963, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:48<21:13, 3.66s/it] 33%|███▎ | 173/520 [10:51<21:06, 3.65s/it] {'loss': 1.3885, 'grad_norm': 0.004722141487616935, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:51<21:06, 3.65s/it] 33%|███▎ | 174/520 [10:55<21:14, 3.68s/it] {'loss': 1.4681, 'grad_norm': 0.005441651960990278, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [10:55<21:14, 3.68s/it] 34%|███▎ | 175/520 [10:59<21:31, 3.74s/it] {'loss': 1.3683, 'grad_norm': 0.006117909828519322, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [10:59<21:31, 3.74s/it] 34%|███▍ | 176/520 [11:03<21:23, 3.73s/it] {'loss': 1.6468, 'grad_norm': 0.0056618952012787264, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:03<21:23, 3.73s/it] 34%|███▍ | 177/520 [11:06<21:27, 3.75s/it] {'loss': 1.4849, 'grad_norm': 0.0049468466606955265, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:06<21:27, 3.75s/it] 34%|███▍ | 178/520 [11:10<21:33, 3.78s/it] {'loss': 1.4578, 'grad_norm': 0.006774359093460984, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:10<21:33, 3.78s/it] 34%|███▍ | 179/520 [11:14<21:36, 3.80s/it] {'loss': 1.5409, 'grad_norm': 0.004398251112224751, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:14<21:36, 3.80s/it] 35%|███▍ | 180/520 [11:18<21:37, 3.82s/it] {'loss': 1.4401, 'grad_norm': 0.005453616062705246, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:18<21:37, 3.82s/it] 35%|███▍ | 181/520 [11:22<21:37, 3.83s/it] {'loss': 1.4118, 'grad_norm': 0.0037850500695882287, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:22<21:37, 3.83s/it] 35%|███▌ | 182/520 [11:26<21:34, 3.83s/it] {'loss': 1.4317, 'grad_norm': 0.004580719486967217, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:26<21:34, 3.83s/it] 35%|███▌ | 183/520 [11:29<21:32, 3.84s/it] {'loss': 1.4599, 'grad_norm': 0.004775248111786459, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:29<21:32, 3.84s/it] 35%|███▌ | 184/520 [11:33<21:30, 3.84s/it] {'loss': 1.3612, 'grad_norm': 0.005918044280386835, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:33<21:30, 3.84s/it] 36%|███▌ | 185/520 [11:37<21:30, 3.85s/it] {'loss': 1.5628, 'grad_norm': 0.004426423266115881, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:37<21:30, 3.85s/it] 36%|███▌ | 186/520 [11:41<21:27, 3.85s/it] {'loss': 1.3722, 'grad_norm': 0.005098160131177445, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:41<21:27, 3.85s/it] 36%|███▌ | 187/520 [11:45<21:21, 3.85s/it] {'loss': 1.3912, 'grad_norm': 0.006353880106720851, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:45<21:21, 3.85s/it] 36%|███▌ | 188/520 [11:49<21:14, 3.84s/it] {'loss': 1.4694, 'grad_norm': 0.005147177010224108, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:49<21:14, 3.84s/it] 36%|███▋ | 189/520 [11:53<21:15, 3.85s/it] {'loss': 1.4979, 'grad_norm': 0.0044276991213632825, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:53<21:15, 3.85s/it] 37%|███▋ | 190/520 [11:56<21:09, 3.85s/it] {'loss': 1.3925, 'grad_norm': 0.005046531697690854, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [11:56<21:09, 3.85s/it] 37%|███▋ | 191/520 [12:00<21:09, 3.86s/it] {'loss': 1.3544, 'grad_norm': 0.004672400598292022, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:00<21:09, 3.86s/it] 37%|███▋ | 192/520 [12:04<20:51, 3.82s/it] {'loss': 1.451, 'grad_norm': 0.004933350898299663, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:04<20:51, 3.82s/it] 37%|███▋ | 193/520 [12:08<20:31, 3.77s/it] {'loss': 1.5877, 'grad_norm': 0.0055922877643560295, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:08<20:31, 3.77s/it] 37%|███▋ | 194/520 [12:11<20:17, 3.74s/it] {'loss': 1.4229, 'grad_norm': 0.005240182848740194, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:11<20:17, 3.74s/it] 38%|███▊ | 195/520 [12:15<20:05, 3.71s/it] {'loss': 1.4593, 'grad_norm': 0.004802557125445174, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:15<20:05, 3.71s/it] 38%|███▊ | 196/520 [12:19<19:55, 3.69s/it] {'loss': 1.4178, 'grad_norm': 0.005642025225141796, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:19<19:55, 3.69s/it] 38%|███▊ | 197/520 [12:22<19:55, 3.70s/it] {'loss': 1.3892, 'grad_norm': 0.005109997182948789, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:22<19:55, 3.70s/it] 38%|███▊ | 198/520 [12:26<19:48, 3.69s/it] {'loss': 1.4752, 'grad_norm': 0.005507395108586863, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:26<19:48, 3.69s/it] 38%|███▊ | 199/520 [12:30<19:42, 3.68s/it] {'loss': 1.3803, 'grad_norm': 0.0043774641920269804, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:30<19:42, 3.68s/it] 38%|███▊ | 200/520 [12:33<19:34, 3.67s/it] {'loss': 1.4936, 'grad_norm': 0.004993100568620941, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:33<19:34, 3.67s/it] 39%|███▊ | 201/520 [12:37<19:30, 3.67s/it] {'loss': 1.506, 'grad_norm': 0.005067731838893093, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:37<19:30, 3.67s/it] 39%|███▉ | 202/520 [12:41<19:23, 3.66s/it] {'loss': 1.3698, 'grad_norm': 0.004789412789870588, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:41<19:23, 3.66s/it] 39%|███▉ | 203/520 [12:44<19:19, 3.66s/it] {'loss': 1.4196, 'grad_norm': 0.004547809400919369, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:44<19:19, 3.66s/it] 39%|███▉ | 204/520 [12:48<19:13, 3.65s/it] {'loss': 1.4654, 'grad_norm': 0.005663097292945311, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:48<19:13, 3.65s/it] 39%|███▉ | 205/520 [12:52<19:09, 3.65s/it] {'loss': 1.5161, 'grad_norm': 0.005199940577138853, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:52<19:09, 3.65s/it] 40%|███▉ | 206/520 [12:55<19:06, 3.65s/it] {'loss': 1.5073, 'grad_norm': 0.004899688813186845, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [12:55<19:06, 3.65s/it] 40%|███▉ | 207/520 [12:59<19:02, 3.65s/it] {'loss': 1.5002, 'grad_norm': 0.00477064374613848, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [12:59<19:02, 3.65s/it] 40%|████ | 208/520 [13:02<18:58, 3.65s/it] {'loss': 1.4609, 'grad_norm': 0.0050786167300708625, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:02<18:58, 3.65s/it] 40%|████ | 209/520 [13:06<18:57, 3.66s/it] {'loss': 1.3961, 'grad_norm': 0.0046650483265666025, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:06<18:57, 3.66s/it] 40%|████ | 210/520 [13:10<18:53, 3.66s/it] {'loss': 1.4736, 'grad_norm': 0.005372128678720418, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:10<18:53, 3.66s/it] 41%|████ | 211/520 [13:13<18:48, 3.65s/it] {'loss': 1.4758, 'grad_norm': 0.004092300842936841, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:13<18:48, 3.65s/it] 41%|████ | 212/520 [13:17<18:46, 3.66s/it] {'loss': 1.4267, 'grad_norm': 0.0045429384403274245, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:17<18:46, 3.66s/it] 41%|████ | 213/520 [13:21<18:40, 3.65s/it] {'loss': 1.406, 'grad_norm': 0.0055476171274372104, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:21<18:40, 3.65s/it] 41%|████ | 214/520 [13:24<18:36, 3.65s/it] {'loss': 1.398, 'grad_norm': 0.004946693729928077, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:24<18:36, 3.65s/it] 41%|████▏ | 215/520 [13:28<18:34, 3.65s/it] {'loss': 1.4124, 'grad_norm': 0.004617320133306846, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:28<18:34, 3.65s/it] 42%|████▏ | 216/520 [13:32<18:28, 3.65s/it] {'loss': 1.2976, 'grad_norm': 0.004560890353931275, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:32<18:28, 3.65s/it] 42%|████▏ | 217/520 [13:35<18:27, 3.65s/it] {'loss': 1.4323, 'grad_norm': 0.00469504963820486, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:35<18:27, 3.65s/it] 42%|████▏ | 218/520 [13:39<18:24, 3.66s/it] {'loss': 1.4426, 'grad_norm': 0.004944533932046612, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:39<18:24, 3.66s/it] 42%|████▏ | 219/520 [13:43<18:18, 3.65s/it] {'loss': 1.3916, 'grad_norm': 0.004270740492999724, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:43<18:18, 3.65s/it] 42%|████▏ | 220/520 [13:46<18:11, 3.64s/it] {'loss': 1.4816, 'grad_norm': 0.004695662797180673, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:46<18:11, 3.64s/it] 42%|████▎ | 221/520 [13:50<18:07, 3.64s/it] {'loss': 1.4393, 'grad_norm': 0.004495385578130534, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:50<18:07, 3.64s/it] 43%|████▎ | 222/520 [13:54<18:03, 3.63s/it] {'loss': 1.3342, 'grad_norm': 0.004871454415618374, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [13:54<18:03, 3.63s/it] 43%|████▎ | 223/520 [13:57<18:01, 3.64s/it] {'loss': 1.3115, 'grad_norm': 0.004214156233325442, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [13:57<18:01, 3.64s/it] 43%|████▎ | 224/520 [14:01<18:04, 3.66s/it] {'loss': 1.7122, 'grad_norm': 0.005232412789881906, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:01<18:04, 3.66s/it] 43%|████▎ | 225/520 [14:05<18:21, 3.73s/it] {'loss': 1.3643, 'grad_norm': 0.005195247891370011, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:05<18:21, 3.73s/it] 43%|████▎ | 226/520 [14:09<18:31, 3.78s/it] {'loss': 1.4534, 'grad_norm': 0.00416432622753778, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:09<18:31, 3.78s/it] 44%|████▎ | 227/520 [14:13<18:40, 3.82s/it] {'loss': 1.4365, 'grad_norm': 0.004548274593954075, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:13<18:40, 3.82s/it] 44%|████▍ | 228/520 [14:17<18:43, 3.85s/it] {'loss': 1.6392, 'grad_norm': 0.005570393356304317, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:17<18:43, 3.85s/it] 44%|████▍ | 229/520 [14:20<18:43, 3.86s/it] {'loss': 1.4198, 'grad_norm': 0.004214493038590924, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:20<18:43, 3.86s/it] 44%|████▍ | 230/520 [14:24<18:42, 3.87s/it] {'loss': 1.2871, 'grad_norm': 0.005122702167089044, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:24<18:42, 3.87s/it] 44%|████▍ | 231/520 [14:28<18:41, 3.88s/it] {'loss': 1.3498, 'grad_norm': 0.004325161659450497, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:28<18:41, 3.88s/it] 45%|████▍ | 232/520 [14:32<18:38, 3.88s/it] {'loss': 1.6649, 'grad_norm': 0.005261190746907285, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:32<18:38, 3.88s/it] 45%|████▍ | 233/520 [14:36<18:36, 3.89s/it] {'loss': 1.5178, 'grad_norm': 0.0050199983931531415, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:36<18:36, 3.89s/it] 45%|████▌ | 234/520 [14:40<18:31, 3.89s/it] {'loss': 1.3031, 'grad_norm': 0.004628805144290492, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:40<18:31, 3.89s/it] 45%|████▌ | 235/520 [14:44<18:27, 3.89s/it] {'loss': 1.3441, 'grad_norm': 0.005552036768058347, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:44<18:27, 3.89s/it] 45%|████▌ | 236/520 [14:48<18:24, 3.89s/it] {'loss': 1.4841, 'grad_norm': 0.004952942085183992, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:48<18:24, 3.89s/it] 46%|████▌ | 237/520 [14:52<18:19, 3.89s/it] {'loss': 1.4346, 'grad_norm': 0.004760826113387384, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:52<18:19, 3.89s/it] 46%|████▌ | 238/520 [14:55<18:16, 3.89s/it] {'loss': 1.3731, 'grad_norm': 0.004870220243710184, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [14:55<18:16, 3.89s/it] 46%|████▌ | 239/520 [14:59<18:13, 3.89s/it] {'loss': 1.4897, 'grad_norm': 0.004748132035155347, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [14:59<18:13, 3.89s/it] 46%|████▌ | 240/520 [15:03<18:08, 3.89s/it] {'loss': 1.2257, 'grad_norm': 0.005384843791833879, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:03<18:08, 3.89s/it] 46%|████▋ | 241/520 [15:07<18:06, 3.89s/it] {'loss': 1.3282, 'grad_norm': 0.004913521243209045, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:07<18:06, 3.89s/it] 47%|████▋ | 242/520 [15:11<18:01, 3.89s/it] {'loss': 1.3444, 'grad_norm': 0.004036152947502262, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:11<18:01, 3.89s/it] 47%|████▋ | 243/520 [15:15<18:00, 3.90s/it] {'loss': 1.331, 'grad_norm': 0.004727697488922482, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:15<18:00, 3.90s/it] 47%|████▋ | 244/520 [15:19<17:55, 3.90s/it] {'loss': 1.4826, 'grad_norm': 0.004347633660640454, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:19<17:55, 3.90s/it] 47%|████▋ | 245/520 [15:23<17:50, 3.89s/it] {'loss': 1.3228, 'grad_norm': 0.0041533000576086665, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:23<17:50, 3.89s/it] 47%|████▋ | 246/520 [15:27<17:45, 3.89s/it] {'loss': 1.6391, 'grad_norm': 0.005421350508369464, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:27<17:45, 3.89s/it] 48%|████▊ | 247/520 [15:30<17:21, 3.81s/it] {'loss': 1.5114, 'grad_norm': 0.004364019726316426, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:30<17:21, 3.81s/it] 48%|████▊ | 248/520 [15:34<17:05, 3.77s/it] {'loss': 1.3153, 'grad_norm': 0.004616870185115066, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:34<17:05, 3.77s/it] 48%|████▊ | 249/520 [15:38<16:53, 3.74s/it] {'loss': 1.4409, 'grad_norm': 0.0051132586578277245, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:38<16:53, 3.74s/it] 48%|████▊ | 250/520 [15:41<16:41, 3.71s/it] {'loss': 1.3764, 'grad_norm': 0.004884067031537341, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:41<16:41, 3.71s/it] 48%|████▊ | 251/520 [15:45<16:31, 3.69s/it] {'loss': 1.4394, 'grad_norm': 0.0038680892024340026, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:45<16:31, 3.69s/it] 48%|████▊ | 252/520 [15:49<16:28, 3.69s/it] {'loss': 1.4902, 'grad_norm': 0.00443384288250244, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:49<16:28, 3.69s/it] 49%|████▊ | 253/520 [15:52<16:20, 3.67s/it] {'loss': 1.4419, 'grad_norm': 0.0054193502411602, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:52<16:20, 3.67s/it] 49%|████▉ | 254/520 [15:56<16:16, 3.67s/it] {'loss': 1.3407, 'grad_norm': 0.003956714696851159, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:56<16:16, 3.67s/it] 49%|████▉ | 255/520 [15:59<16:10, 3.66s/it] {'loss': 1.3581, 'grad_norm': 0.004882325628182697, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [15:59<16:10, 3.66s/it] 49%|████▉ | 256/520 [16:03<16:09, 3.67s/it] {'loss': 1.4035, 'grad_norm': 0.004931328854679227, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:03<16:09, 3.67s/it] 49%|████▉ | 257/520 [16:07<16:06, 3.67s/it] {'loss': 1.4055, 'grad_norm': 0.0043246973229386755, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:07<16:06, 3.67s/it] 50%|████▉ | 258/520 [16:11<16:01, 3.67s/it] {'loss': 1.4206, 'grad_norm': 0.003682782988351623, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:11<16:01, 3.67s/it] 50%|████▉ | 259/520 [16:14<15:56, 3.66s/it] {'loss': 1.4835, 'grad_norm': 0.004723257308032888, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:14<15:56, 3.66s/it] 50%|█████ | 260/520 [16:18<15:59, 3.69s/it] {'loss': 1.5834, 'grad_norm': 0.0045916913652294, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:18<15:59, 3.69s/it] 50%|█████ | 261/520 [16:22<16:08, 3.74s/it] {'loss': 1.5075, 'grad_norm': 0.004296314932676711, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:22<16:08, 3.74s/it] 50%|█████ | 262/520 [16:26<16:14, 3.78s/it] {'loss': 1.3114, 'grad_norm': 0.004592807800106011, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:26<16:14, 3.78s/it] 51%|█████ | 263/520 [16:29<16:15, 3.79s/it] {'loss': 1.5119, 'grad_norm': 0.004649569780905661, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:29<16:15, 3.79s/it] 51%|█████ | 264/520 [16:33<16:17, 3.82s/it] {'loss': 1.4438, 'grad_norm': 0.00443357220157697, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:33<16:17, 3.82s/it] 51%|█████ | 265/520 [16:37<16:18, 3.84s/it] {'loss': 1.3268, 'grad_norm': 0.0051644918321281435, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:37<16:18, 3.84s/it] 51%|█████ | 266/520 [16:41<16:18, 3.85s/it] {'loss': 1.1718, 'grad_norm': 0.00408665584387037, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:41<16:18, 3.85s/it] 51%|█████▏ | 267/520 [16:45<16:16, 3.86s/it] {'loss': 1.3323, 'grad_norm': 0.004318283634957409, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:45<16:16, 3.86s/it] 52%|█████▏ | 268/520 [16:49<16:13, 3.86s/it] {'loss': 1.6457, 'grad_norm': 0.0069126309000922944, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:49<16:13, 3.86s/it] 52%|█████▏ | 269/520 [16:53<16:09, 3.86s/it] {'loss': 1.4455, 'grad_norm': 0.004502309451977443, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:53<16:09, 3.86s/it] 52%|█████▏ | 270/520 [16:57<16:07, 3.87s/it] {'loss': 1.4142, 'grad_norm': 0.004302986249564448, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:57<16:07, 3.87s/it] 52%|█████▏ | 271/520 [17:01<16:06, 3.88s/it] {'loss': 1.4488, 'grad_norm': 0.004659043732062836, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:01<16:06, 3.88s/it] 52%|█████▏ | 272/520 [17:04<16:01, 3.88s/it] {'loss': 1.4388, 'grad_norm': 0.005729137155729721, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:04<16:01, 3.88s/it] 52%|█████▎ | 273/520 [17:08<15:59, 3.88s/it] {'loss': 1.5753, 'grad_norm': 0.005038867439068449, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:08<15:59, 3.88s/it] 53%|█████▎ | 274/520 [17:12<15:35, 3.80s/it] {'loss': 1.3831, 'grad_norm': 0.004446299490172886, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:12<15:35, 3.80s/it] 53%|█████▎ | 275/520 [17:16<15:20, 3.76s/it] {'loss': 1.321, 'grad_norm': 0.005683862379315431, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:16<15:20, 3.76s/it] 53%|█████▎ | 276/520 [17:19<15:09, 3.73s/it] {'loss': 1.4241, 'grad_norm': 0.004771689797029917, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:19<15:09, 3.73s/it] 53%|█████▎ | 277/520 [17:23<15:01, 3.71s/it] {'loss': 1.5535, 'grad_norm': 0.005546517173859429, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:23<15:01, 3.71s/it] 53%|█████▎ | 278/520 [17:27<14:53, 3.69s/it] {'loss': 1.2698, 'grad_norm': 0.004029758440958828, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:27<14:53, 3.69s/it] 54%|█████▎ | 279/520 [17:30<14:44, 3.67s/it] {'loss': 1.4482, 'grad_norm': 0.0052350932765129156, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:30<14:44, 3.67s/it] 54%|█████▍ | 280/520 [17:34<14:38, 3.66s/it] {'loss': 1.3237, 'grad_norm': 0.004560122607090168, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:34<14:38, 3.66s/it] 54%|█████▍ | 281/520 [17:37<14:34, 3.66s/it] {'loss': 1.4466, 'grad_norm': 0.004704326619720054, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:37<14:34, 3.66s/it] 54%|█████▍ | 282/520 [17:41<14:28, 3.65s/it] {'loss': 1.2858, 'grad_norm': 0.004293316523556067, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:41<14:28, 3.65s/it] 54%|█████▍ | 283/520 [17:45<14:23, 3.64s/it] {'loss': 1.4867, 'grad_norm': 0.005309134068045914, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:45<14:23, 3.64s/it] 55%|█████▍ | 284/520 [17:48<14:19, 3.64s/it] {'loss': 1.4267, 'grad_norm': 0.005734649379000293, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:48<14:19, 3.64s/it] 55%|█████▍ | 285/520 [17:52<14:14, 3.64s/it] {'loss': 1.3161, 'grad_norm': 0.004408280567340253, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:52<14:14, 3.64s/it] 55%|█████▌ | 286/520 [17:56<14:09, 3.63s/it] {'loss': 1.1761, 'grad_norm': 0.004496673597475077, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:56<14:09, 3.63s/it] 55%|█████▌ | 287/520 [17:59<14:07, 3.64s/it] {'loss': 1.435, 'grad_norm': 0.005099131930730983, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [17:59<14:07, 3.64s/it] 55%|█████▌ | 288/520 [18:03<14:04, 3.64s/it] {'loss': 1.4918, 'grad_norm': 0.004490747287806451, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:03<14:04, 3.64s/it] 56%|█████▌ | 289/520 [18:07<14:03, 3.65s/it] {'loss': 1.3266, 'grad_norm': 0.0039775469512654555, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:07<14:03, 3.65s/it] 56%|█████▌ | 290/520 [18:10<14:00, 3.65s/it] {'loss': 1.2414, 'grad_norm': 0.003805897912424505, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:10<14:00, 3.65s/it] 56%|█████▌ | 291/520 [18:14<13:55, 3.65s/it] {'loss': 1.3051, 'grad_norm': 0.005584672158076302, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:14<13:55, 3.65s/it] 56%|█████▌ | 292/520 [18:18<13:54, 3.66s/it] {'loss': 1.3592, 'grad_norm': 0.004292913244532643, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:18<13:54, 3.66s/it] 56%|█████▋ | 293/520 [18:21<13:50, 3.66s/it] {'loss': 1.2879, 'grad_norm': 0.0043324017840501335, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:21<13:50, 3.66s/it] 57%|█████▋ | 294/520 [18:25<13:48, 3.66s/it] {'loss': 1.3262, 'grad_norm': 0.00464807002312824, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:25<13:48, 3.66s/it] 57%|█████▋ | 295/520 [18:29<13:45, 3.67s/it] {'loss': 1.5107, 'grad_norm': 0.005809841372218695, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:29<13:45, 3.67s/it] 57%|█████▋ | 296/520 [18:32<13:43, 3.67s/it] {'loss': 1.267, 'grad_norm': 0.004704357286144621, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:32<13:43, 3.67s/it] 57%|█████▋ | 297/520 [18:36<13:38, 3.67s/it] {'loss': 1.4072, 'grad_norm': 0.004605175475680169, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:36<13:38, 3.67s/it] 57%|█████▋ | 298/520 [18:40<13:32, 3.66s/it] {'loss': 1.3612, 'grad_norm': 0.0037640771275334675, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:40<13:32, 3.66s/it] 57%|█████▊ | 299/520 [18:43<13:29, 3.66s/it] {'loss': 1.4993, 'grad_norm': 0.004515780902231903, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:43<13:29, 3.66s/it] 58%|█████▊ | 300/520 [18:47<13:25, 3.66s/it] {'loss': 1.4315, 'grad_norm': 0.004163380661079323, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:47<13:25, 3.66s/it] 58%|█████▊ | 301/520 [18:51<13:20, 3.66s/it] {'loss': 1.3927, 'grad_norm': 0.004148834169628905, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:51<13:20, 3.66s/it] 58%|█████▊ | 302/520 [18:54<13:17, 3.66s/it] {'loss': 1.5127, 'grad_norm': 0.004273293509387345, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:54<13:17, 3.66s/it] 58%|█████▊ | 303/520 [18:58<13:15, 3.67s/it] {'loss': 1.3247, 'grad_norm': 0.004928968752390283, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [18:58<13:15, 3.67s/it] 58%|█████▊ | 304/520 [19:02<13:12, 3.67s/it] {'loss': 1.4026, 'grad_norm': 0.004740965576265346, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:02<13:12, 3.67s/it] 59%|█████▊ | 305/520 [19:05<13:07, 3.66s/it] {'loss': 1.4516, 'grad_norm': 0.004820780271019366, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:05<13:07, 3.66s/it] 59%|█████▉ | 306/520 [19:09<13:03, 3.66s/it] {'loss': 1.376, 'grad_norm': 0.00413836531886086, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:09<13:03, 3.66s/it] 59%|█████▉ | 307/520 [19:13<13:18, 3.75s/it] {'loss': 1.3201, 'grad_norm': 0.003933490665359713, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:13<13:18, 3.75s/it] 59%|█████▉ | 308/520 [19:16<13:08, 3.72s/it] {'loss': 1.4318, 'grad_norm': 0.004223247099164695, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:16<13:08, 3.72s/it] 59%|█████▉ | 309/520 [19:20<12:59, 3.69s/it] {'loss': 1.3005, 'grad_norm': 0.0039240875431060555, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:20<12:59, 3.69s/it] 60%|█████▉ | 310/520 [19:24<12:51, 3.67s/it] {'loss': 1.2829, 'grad_norm': 0.004130893067526182, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:24<12:51, 3.67s/it] 60%|█████▉ | 311/520 [19:27<12:46, 3.67s/it] {'loss': 1.2468, 'grad_norm': 0.0041869487103205595, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:27<12:46, 3.67s/it] 60%|██████ | 312/520 [19:31<12:40, 3.66s/it] {'loss': 1.2354, 'grad_norm': 0.004645691245132635, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:31<12:40, 3.66s/it] 60%|██████ | 313/520 [19:35<12:36, 3.66s/it] {'loss': 1.2276, 'grad_norm': 0.003830790399280937, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:35<12:36, 3.66s/it] 60%|██████ | 314/520 [19:39<13:04, 3.81s/it] {'loss': 1.266, 'grad_norm': 0.0040777179439220115, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [19:39<13:04, 3.81s/it] 61%|██████ | 315/520 [19:42<12:48, 3.75s/it] {'loss': 1.5078, 'grad_norm': 0.006946344710976559, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [19:42<12:48, 3.75s/it] 61%|██████ | 316/520 [19:46<13:01, 3.83s/it] {'loss': 1.2308, 'grad_norm': 0.005207205069951875, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [19:46<13:01, 3.83s/it] 61%|██████ | 317/520 [19:50<12:45, 3.77s/it] {'loss': 1.2587, 'grad_norm': 0.003948479996275642, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [19:50<12:45, 3.77s/it] 61%|██████ | 318/520 [19:54<12:32, 3.73s/it] {'loss': 1.4019, 'grad_norm': 0.004520884610688344, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [19:54<12:32, 3.73s/it] 61%|██████▏ | 319/520 [19:58<12:57, 3.87s/it] {'loss': 1.2556, 'grad_norm': 0.004712967119592113, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [19:58<12:57, 3.87s/it] 62%|██████▏ | 320/520 [20:02<12:51, 3.86s/it] {'loss': 1.1948, 'grad_norm': 0.004419079824982413, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:02<12:51, 3.86s/it] 62%|██████▏ | 321/520 [20:06<12:44, 3.84s/it] {'loss': 1.4079, 'grad_norm': 0.0043690511896973165, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:06<12:44, 3.84s/it] 62%|██████▏ | 322/520 [20:09<12:36, 3.82s/it] {'loss': 1.3201, 'grad_norm': 0.004912108908976708, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:09<12:36, 3.82s/it] 62%|██████▏ | 323/520 [20:13<12:20, 3.76s/it] {'loss': 1.4095, 'grad_norm': 0.005018822907626857, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:13<12:20, 3.76s/it] 62%|██████▏ | 324/520 [20:17<12:09, 3.72s/it] {'loss': 1.3299, 'grad_norm': 0.005290716165538675, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:17<12:09, 3.72s/it] 62%|██████▎ | 325/520 [20:20<12:00, 3.70s/it] {'loss': 1.3547, 'grad_norm': 0.004539198851891356, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:20<12:00, 3.70s/it] 63%|██████▎ | 326/520 [20:24<11:56, 3.69s/it] {'loss': 1.3226, 'grad_norm': 0.004439409525507857, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:24<11:56, 3.69s/it] 63%|██████▎ | 327/520 [20:28<12:03, 3.75s/it] {'loss': 1.4911, 'grad_norm': 0.006232628109764156, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:28<12:03, 3.75s/it] 63%|██████▎ | 328/520 [20:32<12:06, 3.79s/it] {'loss': 1.4106, 'grad_norm': 0.00440040625937516, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:32<12:06, 3.79s/it] 63%|██████▎ | 329/520 [20:35<12:08, 3.81s/it] {'loss': 1.2332, 'grad_norm': 0.003577173906725324, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:35<12:08, 3.81s/it] 63%|██████▎ | 330/520 [20:39<12:09, 3.84s/it] {'loss': 1.3126, 'grad_norm': 0.004078284641365743, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:39<12:09, 3.84s/it] 64%|██████▎ | 331/520 [20:43<12:07, 3.85s/it] {'loss': 1.2853, 'grad_norm': 0.004266630333586245, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:43<12:07, 3.85s/it] 64%|██████▍ | 332/520 [20:47<12:07, 3.87s/it] {'loss': 1.4827, 'grad_norm': 0.0045657873280982916, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:47<12:07, 3.87s/it] 64%|██████▍ | 333/520 [20:51<12:04, 3.87s/it] {'loss': 1.4584, 'grad_norm': 0.0045790329661468555, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:51<12:04, 3.87s/it] 64%|██████▍ | 334/520 [20:55<12:01, 3.88s/it] {'loss': 1.3313, 'grad_norm': 0.00460353003922573, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:55<12:01, 3.88s/it] 64%|██████▍ | 335/520 [20:59<11:59, 3.89s/it] {'loss': 1.3244, 'grad_norm': 0.004096727662884589, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [20:59<11:59, 3.89s/it] 65%|██████▍ | 336/520 [21:03<11:54, 3.89s/it] {'loss': 1.1995, 'grad_norm': 0.004979072765616342, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:03<11:54, 3.89s/it] 65%|██████▍ | 337/520 [21:07<11:51, 3.89s/it] {'loss': 1.2052, 'grad_norm': 0.0044930091221071205, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:07<11:51, 3.89s/it] 65%|██████▌ | 338/520 [21:11<11:47, 3.89s/it] {'loss': 1.3526, 'grad_norm': 0.004780331818140722, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:11<11:47, 3.89s/it] 65%|██████▌ | 339/520 [21:14<11:42, 3.88s/it] {'loss': 1.2772, 'grad_norm': 0.004246207807542125, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:14<11:42, 3.88s/it] 65%|██████▌ | 340/520 [21:18<11:38, 3.88s/it] {'loss': 1.2605, 'grad_norm': 0.004075955133252654, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:18<11:38, 3.88s/it] 66%|██████▌ | 341/520 [21:22<11:22, 3.81s/it] {'loss': 1.2893, 'grad_norm': 0.004317806735597507, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:22<11:22, 3.81s/it] 66%|██████▌ | 342/520 [21:26<11:08, 3.75s/it] {'loss': 1.4469, 'grad_norm': 0.0050813124590218355, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:26<11:08, 3.75s/it] 66%|██████▌ | 343/520 [21:29<11:01, 3.74s/it] {'loss': 1.4343, 'grad_norm': 0.005529998555021089, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:29<11:01, 3.74s/it] 66%|██████▌ | 344/520 [21:33<10:55, 3.72s/it] {'loss': 1.2262, 'grad_norm': 0.004413163198210336, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:33<10:55, 3.72s/it] 66%|██████▋ | 345/520 [21:37<10:48, 3.71s/it] {'loss': 1.3645, 'grad_norm': 0.004802472276533664, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:37<10:48, 3.71s/it] 67%|██████▋ | 346/520 [21:40<10:42, 3.69s/it] {'loss': 1.3934, 'grad_norm': 0.004713761057408822, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:40<10:42, 3.69s/it] 67%|██████▋ | 347/520 [21:44<10:37, 3.69s/it] {'loss': 1.252, 'grad_norm': 0.004222408357822683, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:44<10:37, 3.69s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:48<10:32, 3.68s/it] {'loss': 1.2177, 'grad_norm': 0.005264308322704792, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:48<10:32, 3.68s/it] 67%|██████▋ | 349/520 [21:51<10:27, 3.67s/it] {'loss': 1.2604, 'grad_norm': 0.004381820981595776, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:51<10:27, 3.67s/it] 67%|██████▋ | 350/520 [21:55<10:22, 3.66s/it] {'loss': 1.2914, 'grad_norm': 0.004633796293981259, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:55<10:22, 3.66s/it] 68%|██████▊ | 351/520 [21:59<10:19, 3.66s/it] {'loss': 1.2016, 'grad_norm': 0.004171808623282944, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [21:59<10:19, 3.66s/it] 68%|██████▊ | 352/520 [22:02<10:25, 3.72s/it] {'loss': 1.3216, 'grad_norm': 0.00421528470659356, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:02<10:25, 3.72s/it] 68%|██████▊ | 353/520 [22:06<10:30, 3.78s/it] {'loss': 1.3364, 'grad_norm': 0.003654264594412709, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:06<10:30, 3.78s/it] 68%|██████▊ | 354/520 [22:10<10:33, 3.82s/it] {'loss': 1.4935, 'grad_norm': 0.004474528265480774, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:10<10:33, 3.82s/it] 68%|██████▊ | 355/520 [22:14<10:31, 3.83s/it] {'loss': 1.2668, 'grad_norm': 0.003991219998018151, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:14<10:31, 3.83s/it] 68%|██████▊ | 356/520 [22:18<10:30, 3.84s/it] {'loss': 1.2587, 'grad_norm': 0.004339308701635785, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:18<10:30, 3.84s/it] 69%|██████▊ | 357/520 [22:22<10:28, 3.86s/it] {'loss': 1.2751, 'grad_norm': 0.0036490076925400107, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:22<10:28, 3.86s/it] 69%|██████▉ | 358/520 [22:26<10:25, 3.86s/it] {'loss': 1.2073, 'grad_norm': 0.004321979368619716, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:26<10:25, 3.86s/it] 69%|██████▉ | 359/520 [22:30<10:24, 3.88s/it] {'loss': 1.4097, 'grad_norm': 0.004832631314905466, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:30<10:24, 3.88s/it] 69%|██████▉ | 360/520 [22:34<10:21, 3.89s/it] {'loss': 1.4498, 'grad_norm': 0.005536260781209669, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:34<10:21, 3.89s/it] 69%|██████▉ | 361/520 [22:37<10:17, 3.88s/it] {'loss': 1.4213, 'grad_norm': 0.0038684016956341077, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:37<10:17, 3.88s/it] 70%|██████▉ | 362/520 [22:41<10:12, 3.88s/it] {'loss': 1.272, 'grad_norm': 0.004715327694650159, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:41<10:12, 3.88s/it] 70%|██████▉ | 363/520 [22:45<10:08, 3.88s/it] {'loss': 1.3041, 'grad_norm': 0.003966681890895369, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:45<10:08, 3.88s/it] 70%|███████ | 364/520 [22:49<10:05, 3.88s/it] {'loss': 1.435, 'grad_norm': 0.004536579582765995, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [22:49<10:05, 3.88s/it] 70%|███████ | 365/520 [22:53<10:00, 3.88s/it] {'loss': 1.3747, 'grad_norm': 0.004526893743511783, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [22:53<10:00, 3.88s/it] 70%|███████ | 366/520 [22:57<09:57, 3.88s/it] {'loss': 1.3186, 'grad_norm': 0.004055398986891147, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [22:57<09:57, 3.88s/it] 71%|███████ | 367/520 [23:01<09:53, 3.88s/it] {'loss': 1.3219, 'grad_norm': 0.004094798012433584, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:01<09:53, 3.88s/it] 71%|███████ | 368/520 [23:05<09:48, 3.87s/it] {'loss': 1.1738, 'grad_norm': 0.004637071961226713, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:05<09:48, 3.87s/it] 71%|███████ | 369/520 [23:08<09:45, 3.88s/it] {'loss': 1.3928, 'grad_norm': 0.004381080095521119, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:08<09:45, 3.88s/it] 71%|███████ | 370/520 [23:12<09:40, 3.87s/it] {'loss': 1.2286, 'grad_norm': 0.003745631356835922, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:12<09:40, 3.87s/it] 71%|███████▏ | 371/520 [23:16<09:35, 3.86s/it] {'loss': 1.224, 'grad_norm': 0.0042542867170304996, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:16<09:35, 3.86s/it] 72%|███████▏ | 372/520 [23:20<09:33, 3.87s/it] {'loss': 1.4894, 'grad_norm': 0.004341079394788024, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:20<09:33, 3.87s/it] 72%|███████▏ | 373/520 [23:24<09:29, 3.87s/it] {'loss': 1.3622, 'grad_norm': 0.004845846347667381, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:24<09:29, 3.87s/it] 72%|███████▏ | 374/520 [23:28<09:25, 3.88s/it] {'loss': 1.3017, 'grad_norm': 0.004069034611128033, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:28<09:25, 3.88s/it] 72%|███████▏ | 375/520 [23:32<09:21, 3.87s/it] {'loss': 1.213, 'grad_norm': 0.003977006317095993, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:32<09:21, 3.87s/it] 72%|███████▏ | 376/520 [23:35<09:17, 3.87s/it] {'loss': 1.337, 'grad_norm': 0.0035991432068157485, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:35<09:17, 3.87s/it] 72%|███████▎ | 377/520 [23:39<09:14, 3.88s/it] {'loss': 1.2779, 'grad_norm': 0.004204524977916954, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:39<09:14, 3.88s/it] 73%|███████▎ | 378/520 [23:43<09:11, 3.89s/it] {'loss': 1.3309, 'grad_norm': 0.004170033948964479, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:43<09:11, 3.89s/it] 73%|███████▎ | 379/520 [23:47<09:08, 3.89s/it] {'loss': 1.318, 'grad_norm': 0.0037800416497458136, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:47<09:08, 3.89s/it] 73%|███████▎ | 380/520 [23:51<09:03, 3.88s/it] {'loss': 1.4715, 'grad_norm': 0.0048802719459937445, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:51<09:03, 3.88s/it] 73%|███████▎ | 381/520 [23:55<09:00, 3.89s/it] {'loss': 1.3084, 'grad_norm': 0.003986973115585232, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:55<09:00, 3.89s/it] 73%|███████▎ | 382/520 [23:59<08:56, 3.89s/it] {'loss': 1.4032, 'grad_norm': 0.004411049272875253, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:59<08:56, 3.89s/it] 74%|███████▎ | 383/520 [24:03<08:51, 3.88s/it] {'loss': 1.1435, 'grad_norm': 0.004311971462038901, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:03<08:51, 3.88s/it] 74%|███████▍ | 384/520 [24:07<08:47, 3.88s/it] {'loss': 1.5425, 'grad_norm': 0.0044639522088284955, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:07<08:47, 3.88s/it] 74%|███████▍ | 385/520 [24:10<08:42, 3.87s/it] {'loss': 1.2856, 'grad_norm': 0.0038754256794943085, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:10<08:42, 3.87s/it] 74%|███████▍ | 386/520 [24:14<08:37, 3.87s/it] {'loss': 1.2258, 'grad_norm': 0.0035882182600538424, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:14<08:37, 3.87s/it] 74%|███████▍ | 387/520 [24:18<08:33, 3.86s/it] {'loss': 1.4858, 'grad_norm': 0.004357867783596316, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:18<08:33, 3.86s/it] 75%|███████▍ | 388/520 [24:22<08:29, 3.86s/it] {'loss': 1.178, 'grad_norm': 0.003942383523559064, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:22<08:29, 3.86s/it] 75%|███████▍ | 389/520 [24:26<08:25, 3.86s/it] {'loss': 1.2372, 'grad_norm': 0.004906474617822051, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:26<08:25, 3.86s/it] 75%|███████▌ | 390/520 [24:30<08:22, 3.87s/it] {'loss': 1.296, 'grad_norm': 0.0038805495172233445, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:30<08:22, 3.87s/it] 75%|███████▌ | 391/520 [24:34<08:18, 3.86s/it] {'loss': 1.4001, 'grad_norm': 0.004079915997096451, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:34<08:18, 3.86s/it] 75%|███████▌ | 392/520 [24:37<08:14, 3.87s/it] {'loss': 1.1902, 'grad_norm': 0.0039426030152637515, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:37<08:14, 3.87s/it] 76%|███████▌ | 393/520 [24:41<08:10, 3.86s/it] {'loss': 1.2658, 'grad_norm': 0.004990800222447441, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:41<08:10, 3.86s/it] 76%|███████▌ | 394/520 [24:45<08:06, 3.86s/it] {'loss': 1.2581, 'grad_norm': 0.004307623595497571, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:45<08:06, 3.86s/it] 76%|███████▌ | 395/520 [24:49<08:02, 3.86s/it] {'loss': 1.2109, 'grad_norm': 0.004276861706018638, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:49<08:02, 3.86s/it] 76%|███████▌ | 396/520 [24:53<07:58, 3.86s/it] {'loss': 1.3048, 'grad_norm': 0.004350153473831335, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:53<07:58, 3.86s/it] 76%|███████▋ | 397/520 [24:57<07:52, 3.84s/it] {'loss': 1.2932, 'grad_norm': 0.003971680296525076, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:57<07:52, 3.84s/it] 77%|███████▋ | 398/520 [25:00<07:46, 3.83s/it] {'loss': 1.2808, 'grad_norm': 0.004083161894549363, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:00<07:46, 3.83s/it] 77%|███████▋ | 399/520 [25:04<07:41, 3.81s/it] {'loss': 1.3203, 'grad_norm': 0.004105573100898163, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:04<07:41, 3.81s/it] 77%|███████▋ | 400/520 [25:08<07:36, 3.80s/it] {'loss': 1.3925, 'grad_norm': 0.004355958603871834, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:08<07:36, 3.80s/it] 77%|███████▋ | 401/520 [25:12<07:33, 3.81s/it] {'loss': 1.0999, 'grad_norm': 0.004412786337227474, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:12<07:33, 3.81s/it] 77%|███████▋ | 402/520 [25:16<07:30, 3.81s/it] {'loss': 1.218, 'grad_norm': 0.004165252360715376, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:16<07:30, 3.81s/it] 78%|███████▊ | 403/520 [25:19<07:25, 3.81s/it] {'loss': 1.2544, 'grad_norm': 0.00421780700749244, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:19<07:25, 3.81s/it] 78%|███████▊ | 404/520 [25:23<07:20, 3.80s/it] {'loss': 1.1621, 'grad_norm': 0.005265152665028494, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:23<07:20, 3.80s/it] 78%|███████▊ | 405/520 [25:27<07:16, 3.80s/it] {'loss': 1.3283, 'grad_norm': 0.004265729318705142, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:27<07:16, 3.80s/it] 78%|███████▊ | 406/520 [25:31<07:12, 3.80s/it] {'loss': 1.2714, 'grad_norm': 0.005024830853036365, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:31<07:12, 3.80s/it] 78%|███████▊ | 407/520 [25:35<07:09, 3.80s/it] {'loss': 1.3577, 'grad_norm': 0.0047214436735353756, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:35<07:09, 3.80s/it] 78%|███████▊ | 408/520 [25:38<07:04, 3.79s/it] {'loss': 1.2387, 'grad_norm': 0.004296735042321129, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:38<07:04, 3.79s/it] 79%|███████▊ | 409/520 [25:42<07:01, 3.79s/it] {'loss': 1.379, 'grad_norm': 0.0046517254385865054, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:42<07:01, 3.79s/it] 79%|███████▉ | 410/520 [25:46<06:57, 3.80s/it] {'loss': 1.0825, 'grad_norm': 0.004012396473801022, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:46<06:57, 3.80s/it] 79%|███████▉ | 411/520 [25:50<06:54, 3.80s/it] {'loss': 1.3402, 'grad_norm': 0.004515367321900544, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:50<06:54, 3.80s/it] 79%|███████▉ | 412/520 [25:54<06:50, 3.80s/it] {'loss': 1.2644, 'grad_norm': 0.003966224510092756, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:54<06:50, 3.80s/it] 79%|███████▉ | 413/520 [25:57<06:48, 3.82s/it] {'loss': 1.3688, 'grad_norm': 0.003994375974803727, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:57<06:48, 3.82s/it] 80%|███████▉ | 414/520 [26:01<06:43, 3.81s/it] {'loss': 1.1412, 'grad_norm': 0.0035812217647082792, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:01<06:43, 3.81s/it] 80%|███████▉ | 415/520 [26:05<06:40, 3.81s/it] {'loss': 1.2323, 'grad_norm': 0.004115388546349161, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:05<06:40, 3.81s/it] 80%|████████ | 416/520 [26:09<06:36, 3.81s/it] {'loss': 1.1455, 'grad_norm': 0.004661232963521307, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:09<06:36, 3.81s/it] 80%|████████ | 417/520 [26:13<06:34, 3.83s/it] {'loss': 1.3216, 'grad_norm': 0.004859890585460617, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:13<06:34, 3.83s/it] 80%|████████ | 418/520 [26:17<06:35, 3.88s/it] {'loss': 1.3048, 'grad_norm': 0.004311195972404924, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:17<06:35, 3.88s/it] 81%|████████ | 419/520 [26:21<06:32, 3.89s/it] {'loss': 1.2843, 'grad_norm': 0.004482298110275807, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:21<06:32, 3.89s/it] 81%|████████ | 420/520 [26:25<06:29, 3.89s/it] {'loss': 1.1594, 'grad_norm': 0.0044380196246101965, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:25<06:29, 3.89s/it] 81%|████████ | 421/520 [26:29<06:26, 3.90s/it] {'loss': 1.0918, 'grad_norm': 0.004562920336335149, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:29<06:26, 3.90s/it] 81%|████████ | 422/520 [26:32<06:22, 3.90s/it] {'loss': 1.2267, 'grad_norm': 0.004665460987931225, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:32<06:22, 3.90s/it] 81%|████████▏ | 423/520 [26:36<06:19, 3.91s/it] {'loss': 1.2289, 'grad_norm': 0.005150535585477637, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:36<06:19, 3.91s/it] 82%|████████▏ | 424/520 [26:40<06:15, 3.91s/it] {'loss': 1.4412, 'grad_norm': 0.00477311722406425, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:40<06:15, 3.91s/it] 82%|████████▏ | 425/520 [26:44<06:11, 3.92s/it] {'loss': 1.2161, 'grad_norm': 0.0037655077290328727, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:44<06:11, 3.92s/it] 82%|████████▏ | 426/520 [26:48<06:07, 3.91s/it] {'loss': 1.254, 'grad_norm': 0.005564902254385323, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:48<06:07, 3.91s/it] 82%|████████▏ | 427/520 [26:52<05:58, 3.85s/it] {'loss': 1.1544, 'grad_norm': 0.004311313569354586, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:52<05:58, 3.85s/it] 82%|████████▏ | 428/520 [26:55<05:49, 3.79s/it] {'loss': 1.127, 'grad_norm': 0.004255568264184649, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:55<05:49, 3.79s/it] 82%|████████▎ | 429/520 [26:59<05:41, 3.76s/it] {'loss': 1.23, 'grad_norm': 0.003925277184874373, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:59<05:41, 3.76s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:03<05:35, 3.72s/it] {'loss': 1.2246, 'grad_norm': 0.0037382362854717767, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:03<05:35, 3.72s/it] 83%|████████▎ | 431/520 [27:06<05:29, 3.71s/it] {'loss': 1.3262, 'grad_norm': 0.004191196181152414, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:06<05:29, 3.71s/it] 83%|████████▎ | 432/520 [27:10<05:25, 3.70s/it] {'loss': 1.1389, 'grad_norm': 0.004728148359194431, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:10<05:25, 3.70s/it] 83%|████████▎ | 433/520 [27:14<05:20, 3.68s/it] {'loss': 1.2795, 'grad_norm': 0.004372324464724103, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:14<05:20, 3.68s/it] 83%|████████▎ | 434/520 [27:17<05:16, 3.68s/it] {'loss': 1.0063, 'grad_norm': 0.00394445895868688, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:17<05:16, 3.68s/it] 84%|████████▎ | 435/520 [27:21<05:12, 3.67s/it] {'loss': 1.3121, 'grad_norm': 0.004436966794267794, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:21<05:12, 3.67s/it] 84%|████████▍ | 436/520 [27:25<05:07, 3.67s/it] {'loss': 1.0974, 'grad_norm': 0.00427823666518496, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:25<05:07, 3.67s/it] 84%|████████▍ | 437/520 [27:28<05:04, 3.67s/it] {'loss': 1.3446, 'grad_norm': 0.004209688467540573, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:28<05:04, 3.67s/it] 84%|████████▍ | 438/520 [27:32<05:01, 3.67s/it] {'loss': 1.1335, 'grad_norm': 0.004089084463258873, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:32<05:01, 3.67s/it] 84%|████████▍ | 439/520 [27:36<04:57, 3.68s/it] {'loss': 1.2853, 'grad_norm': 0.0035829054304319986, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:36<04:57, 3.68s/it] 85%|████████▍ | 440/520 [27:39<04:53, 3.67s/it] {'loss': 1.203, 'grad_norm': 0.004208181145572746, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:39<04:53, 3.67s/it] 85%|████████▍ | 441/520 [27:43<04:50, 3.68s/it] {'loss': 1.3257, 'grad_norm': 0.0042030691113409705, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:43<04:50, 3.68s/it] 85%|████████▌ | 442/520 [27:47<04:46, 3.67s/it] {'loss': 1.2489, 'grad_norm': 0.0047576767689637015, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:47<04:46, 3.67s/it] 85%|████████▌ | 443/520 [27:51<04:43, 3.68s/it] {'loss': 1.2705, 'grad_norm': 0.004275966744768699, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:51<04:43, 3.68s/it] 85%|████████▌ | 444/520 [27:54<04:40, 3.69s/it] {'loss': 1.2381, 'grad_norm': 0.003694267306932709, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:54<04:40, 3.69s/it] 86%|████████▌ | 445/520 [27:58<04:35, 3.68s/it] {'loss': 1.1502, 'grad_norm': 0.004248921464239003, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:58<04:35, 3.68s/it] 86%|████████▌ | 446/520 [28:02<04:31, 3.67s/it] {'loss': 1.4048, 'grad_norm': 0.004490943059944865, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:02<04:31, 3.67s/it] 86%|████████▌ | 447/520 [28:05<04:28, 3.68s/it] {'loss': 1.2566, 'grad_norm': 0.004405164044673575, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:05<04:28, 3.68s/it] 86%|████████▌ | 448/520 [28:09<04:24, 3.68s/it] {'loss': 1.2245, 'grad_norm': 0.0041665891805417465, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:09<04:24, 3.68s/it] 86%|████████▋ | 449/520 [28:13<04:21, 3.69s/it] {'loss': 1.3515, 'grad_norm': 0.00425558836551376, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:13<04:21, 3.69s/it] 87%|████████▋ | 450/520 [28:16<04:17, 3.68s/it] {'loss': 1.2712, 'grad_norm': 0.004147566283376132, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:16<04:17, 3.68s/it] 87%|████████▋ | 451/520 [28:20<04:14, 3.69s/it] {'loss': 1.2612, 'grad_norm': 0.00416524969125335, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:20<04:14, 3.69s/it] 87%|████████▋ | 452/520 [28:24<04:14, 3.74s/it] {'loss': 1.3884, 'grad_norm': 0.004309569499517273, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:24<04:14, 3.74s/it] 87%|████████▋ | 453/520 [28:28<04:13, 3.78s/it] {'loss': 1.363, 'grad_norm': 0.004135297097241623, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:28<04:13, 3.78s/it] 87%|████████▋ | 454/520 [28:31<04:09, 3.77s/it] {'loss': 1.1706, 'grad_norm': 0.004419573220988012, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:31<04:09, 3.77s/it] 88%|████████▊ | 455/520 [28:35<04:05, 3.77s/it] {'loss': 1.3068, 'grad_norm': 0.003934875973810817, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:35<04:05, 3.77s/it] 88%|████████▊ | 456/520 [28:39<04:03, 3.81s/it] {'loss': 1.2126, 'grad_norm': 0.003928890883163404, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:39<04:03, 3.81s/it] 88%|████████▊ | 457/520 [28:43<03:58, 3.79s/it] {'loss': 1.3585, 'grad_norm': 0.003703901541337977, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:43<03:58, 3.79s/it] 88%|████████▊ | 458/520 [28:47<03:54, 3.78s/it] {'loss': 1.37, 'grad_norm': 0.004516356179121368, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:47<03:54, 3.78s/it] 88%|████████▊ | 459/520 [28:50<03:50, 3.77s/it] {'loss': 1.2999, 'grad_norm': 0.0038711927048252093, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:50<03:50, 3.77s/it] 88%|████████▊ | 460/520 [28:54<03:47, 3.80s/it] {'loss': 1.1549, 'grad_norm': 0.004034687738551624, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:54<03:47, 3.80s/it] 89%|████████▊ | 461/520 [28:58<03:42, 3.78s/it] {'loss': 1.4452, 'grad_norm': 0.003565531285245554, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:58<03:42, 3.78s/it] 89%|████████▉ | 462/520 [29:02<03:36, 3.74s/it] {'loss': 1.4385, 'grad_norm': 0.004222112021703263, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:02<03:36, 3.74s/it] 89%|████████▉ | 463/520 [29:05<03:32, 3.73s/it] {'loss': 1.1304, 'grad_norm': 0.004595829284573822, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:05<03:32, 3.73s/it] 89%|████████▉ | 464/520 [29:09<03:31, 3.77s/it] {'loss': 1.285, 'grad_norm': 0.004390741112798093, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:09<03:31, 3.77s/it] 89%|████████▉ | 465/520 [29:13<03:26, 3.75s/it] {'loss': 1.4008, 'grad_norm': 0.004739766111971517, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:13<03:26, 3.75s/it] 90%|████████▉ | 466/520 [29:17<03:21, 3.73s/it] {'loss': 1.2613, 'grad_norm': 0.003620857853319785, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:17<03:21, 3.73s/it] 90%|████████▉ | 467/520 [29:20<03:17, 3.72s/it] {'loss': 1.3191, 'grad_norm': 0.003842074594041153, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:20<03:17, 3.72s/it] 90%|█████████ | 468/520 [29:24<03:12, 3.70s/it] {'loss': 1.2499, 'grad_norm': 0.0045267070796806055, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:24<03:12, 3.70s/it] 90%|█████████ | 469/520 [29:28<03:08, 3.69s/it] {'loss': 1.2997, 'grad_norm': 0.004333636606699952, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:28<03:08, 3.69s/it] 90%|█████████ | 470/520 [29:31<03:06, 3.73s/it] {'loss': 1.1688, 'grad_norm': 0.0036208350645847848, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:31<03:06, 3.73s/it] 91%|█████████ | 471/520 [29:35<03:03, 3.74s/it] {'loss': 1.1834, 'grad_norm': 0.004155904349074524, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:35<03:03, 3.74s/it] 91%|█████████ | 472/520 [29:39<03:00, 3.77s/it] {'loss': 1.166, 'grad_norm': 0.004063442656130247, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:39<03:00, 3.77s/it] 91%|█████████ | 473/520 [29:43<02:57, 3.77s/it] {'loss': 1.2189, 'grad_norm': 0.004441165132951571, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:43<02:57, 3.77s/it] 91%|█████████ | 474/520 [29:47<02:53, 3.78s/it] {'loss': 1.3508, 'grad_norm': 0.003876882715615695, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:47<02:53, 3.78s/it] 91%|█████████▏| 475/520 [29:50<02:50, 3.79s/it] {'loss': 1.2659, 'grad_norm': 0.0040454328979154484, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:50<02:50, 3.79s/it] 92%|█████████▏| 476/520 [29:54<02:46, 3.79s/it] {'loss': 1.2206, 'grad_norm': 0.004414008044947189, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:54<02:46, 3.79s/it] 92%|█████████▏| 477/520 [29:58<02:42, 3.78s/it] {'loss': 1.1989, 'grad_norm': 0.0049548169062340365, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:58<02:42, 3.78s/it] 92%|█████████▏| 478/520 [30:02<02:38, 3.78s/it] {'loss': 1.1649, 'grad_norm': 0.004125001924370163, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:02<02:38, 3.78s/it] 92%|█████████▏| 479/520 [30:06<02:35, 3.79s/it] {'loss': 1.3308, 'grad_norm': 0.004550882564582199, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:06<02:35, 3.79s/it] 92%|█████████▏| 480/520 [30:09<02:31, 3.79s/it] {'loss': 1.3447, 'grad_norm': 0.004097387050655408, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:09<02:31, 3.79s/it] 92%|█████████▎| 481/520 [30:13<02:27, 3.78s/it] {'loss': 1.3711, 'grad_norm': 0.0039841134568286305, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:13<02:27, 3.78s/it] 93%|█████████▎| 482/520 [30:17<02:22, 3.76s/it] {'loss': 1.3614, 'grad_norm': 0.003999174381194345, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:17<02:22, 3.76s/it] 93%|█████████▎| 483/520 [30:21<02:19, 3.77s/it] {'loss': 1.2391, 'grad_norm': 0.004612464509953021, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:21<02:19, 3.77s/it] 93%|█████████▎| 484/520 [30:24<02:16, 3.79s/it] {'loss': 1.2436, 'grad_norm': 0.004202494174088147, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:24<02:16, 3.79s/it] 93%|█████████▎| 485/520 [30:28<02:13, 3.81s/it] {'loss': 1.1845, 'grad_norm': 0.0038237755979589933, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:28<02:13, 3.81s/it] 93%|█████████▎| 486/520 [30:32<02:09, 3.82s/it] {'loss': 1.3124, 'grad_norm': 0.00440178818304564, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:32<02:09, 3.82s/it] 94%|█████████▎| 487/520 [30:36<02:06, 3.84s/it] {'loss': 1.1583, 'grad_norm': 0.0043400474450549925, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:36<02:06, 3.84s/it] 94%|█████████▍| 488/520 [30:40<02:01, 3.78s/it] {'loss': 1.0976, 'grad_norm': 0.004449499025228289, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:40<02:01, 3.78s/it] 94%|█████████▍| 489/520 [30:43<01:56, 3.74s/it] {'loss': 1.3475, 'grad_norm': 0.003654677061328812, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:43<01:56, 3.74s/it] 94%|█████████▍| 490/520 [30:47<01:51, 3.72s/it] {'loss': 1.2303, 'grad_norm': 0.0043355165330003055, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:47<01:51, 3.72s/it] 94%|█████████▍| 491/520 [30:51<01:47, 3.70s/it] {'loss': 1.1791, 'grad_norm': 0.0040390850241083295, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:51<01:47, 3.70s/it] 95%|█████████▍| 492/520 [30:54<01:43, 3.68s/it] {'loss': 1.3072, 'grad_norm': 0.004540520153696484, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:54<01:43, 3.68s/it] 95%|█████████▍| 493/520 [30:58<01:39, 3.68s/it] {'loss': 1.4059, 'grad_norm': 0.004208482646105026, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:58<01:39, 3.68s/it] 95%|█████████▌| 494/520 [31:02<01:35, 3.67s/it] {'loss': 1.252, 'grad_norm': 0.003581908380278186, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:02<01:35, 3.67s/it] 95%|█████████▌| 495/520 [31:05<01:31, 3.67s/it] {'loss': 1.1924, 'grad_norm': 0.00403226904288277, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:05<01:31, 3.67s/it] 95%|█████████▌| 496/520 [31:09<01:28, 3.69s/it] {'loss': 1.1177, 'grad_norm': 0.004226971417081071, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:09<01:28, 3.69s/it] 96%|█████████▌| 497/520 [31:13<01:24, 3.69s/it] {'loss': 1.2726, 'grad_norm': 0.0037564416979747003, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:13<01:24, 3.69s/it] 96%|█████████▌| 498/520 [31:16<01:20, 3.68s/it] {'loss': 1.2047, 'grad_norm': 0.004369037907299584, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:16<01:20, 3.68s/it] 96%|█████████▌| 499/520 [31:20<01:17, 3.68s/it] {'loss': 1.4233, 'grad_norm': 0.004133426904954201, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:20<01:17, 3.68s/it] 96%|█████████▌| 500/520 [31:24<01:13, 3.66s/it] {'loss': 1.3189, 'grad_norm': 0.00462997031464368, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:24<01:13, 3.66s/it] 96%|█████████▋| 501/520 [31:27<01:09, 3.66s/it] {'loss': 1.3365, 'grad_norm': 0.004982456214908723, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:27<01:09, 3.66s/it] 97%|█████████▋| 502/520 [31:31<01:05, 3.65s/it] {'loss': 1.24, 'grad_norm': 0.0038404305690627064, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:31<01:05, 3.65s/it] 97%|█████████▋| 503/520 [31:35<01:02, 3.66s/it] {'loss': 1.3158, 'grad_norm': 0.0041689928113061835, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:35<01:02, 3.66s/it] 97%|█████████▋| 504/520 [31:38<00:58, 3.65s/it] {'loss': 1.2432, 'grad_norm': 0.005015250947371533, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:38<00:58, 3.65s/it] 97%|█████████▋| 505/520 [31:42<00:54, 3.64s/it] {'loss': 1.2934, 'grad_norm': 0.004045453067755292, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:42<00:54, 3.64s/it] 97%|█████████▋| 506/520 [31:46<00:50, 3.63s/it] {'loss': 1.1866, 'grad_norm': 0.0045978338752773, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:46<00:50, 3.63s/it] 98%|█████████▊| 507/520 [31:49<00:47, 3.65s/it] {'loss': 1.4679, 'grad_norm': 0.003985689062509152, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:49<00:47, 3.65s/it] 98%|█████████▊| 508/520 [31:53<00:43, 3.65s/it] {'loss': 1.3186, 'grad_norm': 0.003954138779341612, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:53<00:43, 3.65s/it] 98%|█████████▊| 509/520 [31:57<00:40, 3.65s/it] {'loss': 1.2808, 'grad_norm': 0.004198458250456361, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:57<00:40, 3.65s/it] 98%|█████████▊| 510/520 [32:00<00:36, 3.65s/it] {'loss': 1.2361, 'grad_norm': 0.0039210426706069775, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:00<00:36, 3.65s/it] 98%|█████████▊| 511/520 [32:04<00:32, 3.65s/it] {'loss': 1.2118, 'grad_norm': 0.003869357704776388, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:04<00:32, 3.65s/it] 98%|█████████▊| 512/520 [32:07<00:29, 3.65s/it] {'loss': 1.0879, 'grad_norm': 0.003944073906309729, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:07<00:29, 3.65s/it] 99%|█████████▊| 513/520 [32:11<00:25, 3.65s/it] {'loss': 1.2943, 'grad_norm': 0.0043405223335030345, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:11<00:25, 3.65s/it] 99%|█████████▉| 514/520 [32:15<00:21, 3.66s/it] {'loss': 1.2772, 'grad_norm': 0.0036987686847650175, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:15<00:21, 3.66s/it] 99%|█████████▉| 515/520 [32:18<00:18, 3.65s/it] {'loss': 1.3258, 'grad_norm': 0.004899300509370755, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:18<00:18, 3.65s/it] 99%|█████████▉| 516/520 [32:22<00:14, 3.65s/it] {'loss': 1.1946, 'grad_norm': 0.004082792478522109, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:22<00:14, 3.65s/it] 99%|█████████▉| 517/520 [32:26<00:10, 3.63s/it] {'loss': 1.3834, 'grad_norm': 0.004323940716015532, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:26<00:10, 3.63s/it] 100%|█████████▉| 518/520 [32:29<00:07, 3.61s/it] {'loss': 1.2301, 'grad_norm': 0.003982143218006217, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:29<00:07, 3.61s/it] 100%|█████████▉| 519/520 [32:33<00:03, 3.60s/it] {'loss': 1.317, 'grad_norm': 0.003972869687564591, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:33<00:03, 3.60s/it] 100%|██████████| 520/520 [32:37<00:00, 3.87s/it] {'loss': 1.4125, 'grad_norm': 0.00412952087228812, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:37<00:00, 3.87s/it] {'train_runtime': 1957.8174, 'train_samples_per_second': 33.981, 'train_steps_per_second': 0.266, 'train_loss': 1.4771966047011889, 'epoch': 1.0} + 100%|██████████| 520/520 [32:37<00:00, 3.87s/it] 100%|██████████| 520/520 [32:37<00:00, 3.76s/it] +[2025-10-10 09:53:41,326] [INFO] [launch.py:348:main] Process 728928 exits successfully. +[2025-10-10 09:53:42,328] [INFO] [launch.py:348:main] Process 728930 exits successfully. +[2025-10-10 09:53:42,329] [INFO] [launch.py:348:main] Process 728934 exits successfully. +[2025-10-10 09:53:42,329] [INFO] [launch.py:348:main] Process 728933 exits successfully. +[2025-10-10 09:53:42,330] [INFO] [launch.py:348:main] Process 728932 exits successfully. +[2025-10-10 09:53:43,330] [INFO] [launch.py:348:main] Process 728931 exits successfully. +[2025-10-10 09:53:43,331] [INFO] [launch.py:348:main] Process 728929 exits successfully. +[2025-10-10 09:53:50,339] [INFO] [launch.py:348:main] Process 728927 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.3_2e-1_connector-1.0_1.3_2e-1_ablation_20251010_091502.log +Timestamp: 2025-10-10 09:53:52 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation_20251010_095352.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation_20251010_095352.log new file mode 100644 index 0000000000000000000000000000000000000000..edb0b72f6229d22936868b4b46530c2dbcf3fec6 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation_20251010_095352.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation_20251010_095352.log +Timestamp: 2025-10-10 09:53:52 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 09:53:55,481] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:53:58,404] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 09:53:58,406] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 1.5 --temperature_mlp_text 1.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 1.5 --temperature_mlp_vision 1.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 1.5 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 09:54:01,001] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:02,083] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 09:54:02,083] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 09:54:02,083] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 09:54:02,083] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 09:54:02,083] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 09:54:02,083] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 09:54:02,083] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 09:54:02,085] [INFO] [launch.py:253:main] process 751344 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:02,088] [INFO] [launch.py:253:main] process 751345 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:02,090] [INFO] [launch.py:253:main] process 751346 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:02,092] [INFO] [launch.py:253:main] process 751347 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:02,094] [INFO] [launch.py:253:main] process 751348 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:02,096] [INFO] [launch.py:253:main] process 751349 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:02,098] [INFO] [launch.py:253:main] process 751350 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:02,100] [INFO] [launch.py:253:main] process 751351 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.5', '--temperature_mlp_text', '1.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.5', '--temperature_mlp_vision', '1.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 09:54:08,677] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:08,776] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:09,017] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:09,018] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:09,070] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:09,079] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:09,081] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:09,085] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:09,088] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:09,177] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:09,421] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:09,422] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:09,476] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:09,478] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:09,479] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 09:54:09,487] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:09,497] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.5, 'temperature_mlp': 1.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.5, + "temperature_mlp": 1.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +ywang29-vrdb-test2-worker-0:751344:751344 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751344:751344 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751344:751344 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:751344:751344 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:751344:751344 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:751344:751344 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test2-worker-0:751346:751346 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:751346:751346 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751346:751346 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751346:751346 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:751346:751346 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:751346:751346 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:751350:751350 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:751350:751350 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751350:751350 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751350:751350 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:751350:751350 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:751350:751350 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:751351:751351 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:751351:751351 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751351:751351 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751351:751351 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:751351:751351 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:751351:751351 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:751349:751349 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:751349:751349 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751349:751349 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751349:751349 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:751349:751349 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:751349:751349 [5] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:751345:751345 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:751345:751345 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751345:751345 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751345:751345 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:751345:751345 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:751345:751345 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:751348:751348 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:751348:751348 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751348:751348 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751348:751348 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:751348:751348 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:751348:751348 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:751347:751347 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:751347:751347 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751347:751347 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751347:751347 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:751347:751347 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:751347:751347 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO ncclCommInitRank comm 0x55fd29eaf990 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xb8f19aaa832b5cd6 - Init START +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO ncclCommInitRank comm 0x559fbdb4d160 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xb8f19aaa832b5cd6 - Init START +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO ncclCommInitRank comm 0x564c70c2ab90 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xb8f19aaa832b5cd6 - Init START +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO ncclCommInitRank comm 0x563ac8c919e0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xb8f19aaa832b5cd6 - Init START +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO ncclCommInitRank comm 0x55d8eab711f0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xb8f19aaa832b5cd6 - Init START +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO ncclCommInitRank comm 0x560b7a1aeca0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xb8f19aaa832b5cd6 - Init START +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO ncclCommInitRank comm 0x55644bd4b4e0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xb8f19aaa832b5cd6 - Init START +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO ncclCommInitRank comm 0x5648546b91c0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xb8f19aaa832b5cd6 - Init START +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO comm 0x55fd29eaf990 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO comm 0x559fbdb4d160 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO comm 0x5648546b91c0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO comm 0x564c70c2ab90 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO comm 0x563ac8c919e0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO comm 0x55d8eab711f0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO comm 0x560b7a1aeca0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO comm 0x55644bd4b4e0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:751346:752961 [2] NCCL INFO ncclCommInitRank comm 0x563ac8c919e0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xb8f19aaa832b5cd6 - Init COMPLETE +ywang29-vrdb-test2-worker-0:751349:752964 [5] NCCL INFO ncclCommInitRank comm 0x564c70c2ab90 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xb8f19aaa832b5cd6 - Init COMPLETE +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:751345:752965 [1] NCCL INFO ncclCommInitRank comm 0x55d8eab711f0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xb8f19aaa832b5cd6 - Init COMPLETE +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:751347:752967 [3] NCCL INFO ncclCommInitRank comm 0x559fbdb4d160 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xb8f19aaa832b5cd6 - Init COMPLETE +ywang29-vrdb-test2-worker-0:751350:752962 [6] NCCL INFO ncclCommInitRank comm 0x5648546b91c0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xb8f19aaa832b5cd6 - Init COMPLETE +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:751351:752963 [7] NCCL INFO ncclCommInitRank comm 0x55644bd4b4e0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xb8f19aaa832b5cd6 - Init COMPLETE +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:751348:752966 [4] NCCL INFO ncclCommInitRank comm 0x55fd29eaf990 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xb8f19aaa832b5cd6 - Init COMPLETE +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:751344:752960 [0] NCCL INFO ncclCommInitRank comm 0x560b7a1aeca0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xb8f19aaa832b5cd6 - Init COMPLETE +[2025-10-10 09:54:51,887] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 09:54:53,638] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin...Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... + +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin...Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... + +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=1.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=1.000000 +Pre-training init connector._connector.0.scores: Mean=1.000005 +Pre-training init connector._connector.2.scores: Mean=0.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 09:55:11,984 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 09:55:11,988 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:006->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:751347:758006 [3] NCCL INFO ncclCommInitRank comm 0x7f416c06aee0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x3c9cfb2400c57e0b - Init COMPLETE +ywang29-vrdb-test2-worker-0:751346:758002 [2] NCCL INFO ncclCommInitRank comm 0x7f343806a9c0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x3c9cfb2400c57e0b - Init COMPLETE +ywang29-vrdb-test2-worker-0:751351:758003 [7] NCCL INFO ncclCommInitRank comm 0x7f146c06a8f0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x3c9cfb2400c57e0b - Init COMPLETE +ywang29-vrdb-test2-worker-0:751350:758007 [6] NCCL INFO ncclCommInitRank comm 0x7f537c06abe0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x3c9cfb2400c57e0b - Init COMPLETE +ywang29-vrdb-test2-worker-0:751349:758005 [5] NCCL INFO ncclCommInitRank comm 0x7fd0c006ad20 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x3c9cfb2400c57e0b - Init COMPLETE +ywang29-vrdb-test2-worker-0:751345:758004 [1] NCCL INFO ncclCommInitRank comm 0x7f3f5006abb0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x3c9cfb2400c57e0b - Init COMPLETE +ywang29-vrdb-test2-worker-0:751348:758001 [4] NCCL INFO ncclCommInitRank comm 0x7f59c806b570 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x3c9cfb2400c57e0b - Init COMPLETE +ywang29-vrdb-test2-worker-0:751344:758000 [0] NCCL INFO ncclCommInitRank comm 0x7fe47806b0b0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x3c9cfb2400c57e0b - Init COMPLETE + 0%| | 1/520 [00:14<2:02:21, 14.15s/it] {'loss': 7.441, 'grad_norm': 0.4522466808183246, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:02:21, 14.15s/it] 0%| | 2/520 [00:17<1:09:19, 8.03s/it] {'loss': 6.6999, 'grad_norm': 0.46828106716411305, 'learning_rate': 0.025, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:09:19, 8.03s/it] 1%| | 3/520 [00:21<52:01, 6.04s/it] {'loss': 5.7919, 'grad_norm': 0.2589037347781188, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 3/520 [00:21<52:01, 6.04s/it] 1%| | 4/520 [00:25<43:57, 5.11s/it] {'loss': 3.9785, 'grad_norm': 0.15201167182334013, 'learning_rate': 0.05, 'epoch': 0.01} + 1%| | 4/520 [00:25<43:57, 5.11s/it] 1%| | 5/520 [00:28<39:27, 4.60s/it] {'loss': 5.9075, 'grad_norm': 1.3665166956950354, 'learning_rate': 0.0625, 'epoch': 0.01} + 1%| | 5/520 [00:28<39:27, 4.60s/it] 1%| | 6/520 [00:32<36:56, 4.31s/it] {'loss': 4.9113, 'grad_norm': 0.16929945962265247, 'learning_rate': 0.07500000000000001, 'epoch': 0.01} + 1%| | 6/520 [00:32<36:56, 4.31s/it] 1%|▏ | 7/520 [00:36<34:59, 4.09s/it] {'loss': 3.6174, 'grad_norm': 0.09579305123017046, 'learning_rate': 0.08750000000000001, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<34:59, 4.09s/it] 2%|▏ | 8/520 [00:40<35:26, 4.15s/it] {'loss': 3.0302, 'grad_norm': 0.07071726927951139, 'learning_rate': 0.1, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<35:26, 4.15s/it] 2%|▏ | 9/520 [00:44<35:34, 4.18s/it] {'loss': 2.7211, 'grad_norm': 0.05067096063212823, 'learning_rate': 0.1125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:44<35:34, 4.18s/it] 2%|▏ | 10/520 [00:48<34:12, 4.03s/it] {'loss': 2.2843, 'grad_norm': 0.03535020785458967, 'learning_rate': 0.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<34:12, 4.03s/it] 2%|▏ | 11/520 [00:52<33:39, 3.97s/it] {'loss': 2.233, 'grad_norm': 0.026841419212333666, 'learning_rate': 0.1375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:52<33:39, 3.97s/it] 2%|▏ | 12/520 [00:56<32:51, 3.88s/it] {'loss': 2.5531, 'grad_norm': 0.03237288120857902, 'learning_rate': 0.15000000000000002, 'epoch': 0.02} + 2%|▏ | 12/520 [00:56<32:51, 3.88s/it][2025-10-10 09:56:17,990] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:00<33:59, 4.02s/it] {'loss': 2.0667, 'grad_norm': 0.029659888290582855, 'learning_rate': 0.1625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:00<33:59, 4.02s/it] 3%|▎ | 14/520 [01:04<33:03, 3.92s/it] {'loss': 2.0736, 'grad_norm': 0.02729200177810479, 'learning_rate': 0.17500000000000002, 'epoch': 0.03} + 3%|▎ | 14/520 [01:04<33:03, 3.92s/it] 3%|▎ | 15/520 [01:07<32:26, 3.85s/it] {'loss': 2.2732, 'grad_norm': 0.029965355045884788, 'learning_rate': 0.1875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:07<32:26, 3.85s/it] 3%|▎ | 16/520 [01:11<31:52, 3.79s/it] {'loss': 2.3687, 'grad_norm': 0.06118945345558431, 'learning_rate': 0.2, 'epoch': 0.03} + 3%|▎ | 16/520 [01:11<31:52, 3.79s/it] 3%|▎ | 17/520 [01:15<31:40, 3.78s/it] {'loss': 2.1717, 'grad_norm': 0.05725512438454783, 'learning_rate': 0.1999980572931538, 'epoch': 0.03} + 3%|▎ | 17/520 [01:15<31:40, 3.78s/it] 3%|▎ | 18/520 [01:19<31:46, 3.80s/it] {'loss': 1.8766, 'grad_norm': 0.03127994628027224, 'learning_rate': 0.19999222924809748, 'epoch': 0.03} + 3%|▎ | 18/520 [01:19<31:46, 3.80s/it] 4%|▎ | 19/520 [01:22<31:53, 3.82s/it] {'loss': 2.298, 'grad_norm': 0.02744067042662699, 'learning_rate': 0.19998251609127465, 'epoch': 0.04} + 4%|▎ | 19/520 [01:22<31:53, 3.82s/it] 4%|▍ | 20/520 [01:26<31:43, 3.81s/it] {'loss': 1.8307, 'grad_norm': 0.016955196946386634, 'learning_rate': 0.19996891820008164, 'epoch': 0.04} + 4%|▍ | 20/520 [01:26<31:43, 3.81s/it] 4%|▍ | 21/520 [01:30<31:30, 3.79s/it] {'loss': 2.3246, 'grad_norm': 0.030881285250568994, 'learning_rate': 0.19995143610285276, 'epoch': 0.04} + 4%|▍ | 21/520 [01:30<31:30, 3.79s/it] 4%|▍ | 22/520 [01:34<31:08, 3.75s/it] {'loss': 2.0233, 'grad_norm': 0.018951415422775647, 'learning_rate': 0.19993007047883987, 'epoch': 0.04} + 4%|▍ | 22/520 [01:34<31:08, 3.75s/it] 4%|▍ | 23/520 [01:37<30:51, 3.72s/it] {'loss': 1.8509, 'grad_norm': 0.010663613099582282, 'learning_rate': 0.1999048221581858, 'epoch': 0.04} + 4%|▍ | 23/520 [01:37<30:51, 3.72s/it] 5%|▍ | 24/520 [01:41<30:50, 3.73s/it] {'loss': 2.0698, 'grad_norm': 0.018475858691224086, 'learning_rate': 0.19987569212189224, 'epoch': 0.05} + 5%|▍ | 24/520 [01:41<30:50, 3.73s/it] 5%|▍ | 25/520 [01:45<30:41, 3.72s/it] {'loss': 1.8932, 'grad_norm': 0.014303847224092359, 'learning_rate': 0.19984268150178167, 'epoch': 0.05} + 5%|▍ | 25/520 [01:45<30:41, 3.72s/it] 5%|▌ | 26/520 [01:48<30:39, 3.72s/it] {'loss': 1.8637, 'grad_norm': 0.01536838259716798, 'learning_rate': 0.1998057915804532, 'epoch': 0.05} + 5%|▌ | 26/520 [01:48<30:39, 3.72s/it] 5%|▌ | 27/520 [01:52<30:59, 3.77s/it] {'loss': 1.6929, 'grad_norm': 0.009996821863517085, 'learning_rate': 0.1997650237912329, 'epoch': 0.05} + 5%|▌ | 27/520 [01:52<30:59, 3.77s/it] 5%|▌ | 28/520 [01:56<31:06, 3.79s/it] {'loss': 1.6868, 'grad_norm': 0.010858011093093858, 'learning_rate': 0.199720379718118, 'epoch': 0.05} + 5%|▌ | 28/520 [01:56<31:06, 3.79s/it] 6%|▌ | 29/520 [02:00<31:17, 3.82s/it] {'loss': 1.6882, 'grad_norm': 0.011101823739088145, 'learning_rate': 0.19967186109571552, 'epoch': 0.06} + 6%|▌ | 29/520 [02:00<31:17, 3.82s/it] 6%|▌ | 30/520 [02:04<31:22, 3.84s/it] {'loss': 2.1298, 'grad_norm': 0.014595721524597613, 'learning_rate': 0.19961946980917455, 'epoch': 0.06} + 6%|▌ | 30/520 [02:04<31:22, 3.84s/it] 6%|▌ | 31/520 [02:08<31:25, 3.86s/it] {'loss': 1.6778, 'grad_norm': 0.011102886987802802, 'learning_rate': 0.1995632078941134, 'epoch': 0.06} + 6%|▌ | 31/520 [02:08<31:25, 3.86s/it] 6%|▌ | 32/520 [02:12<31:24, 3.86s/it] {'loss': 2.2728, 'grad_norm': 0.025114471008946523, 'learning_rate': 0.19950307753654017, 'epoch': 0.06} + 6%|▌ | 32/520 [02:12<31:24, 3.86s/it] 6%|▋ | 33/520 [02:16<31:19, 3.86s/it] {'loss': 1.6723, 'grad_norm': 0.011218052876946223, 'learning_rate': 0.19943908107276798, 'epoch': 0.06} + 6%|▋ | 33/520 [02:16<31:19, 3.86s/it] 7%|▋ | 34/520 [02:19<31:17, 3.86s/it] {'loss': 1.6406, 'grad_norm': 0.00752767781999279, 'learning_rate': 0.19937122098932428, 'epoch': 0.07} + 7%|▋ | 34/520 [02:19<31:17, 3.86s/it] 7%|▋ | 35/520 [02:23<31:11, 3.86s/it] {'loss': 1.6668, 'grad_norm': 0.013566816312982706, 'learning_rate': 0.19929949992285395, 'epoch': 0.07} + 7%|▋ | 35/520 [02:23<31:11, 3.86s/it] 7%|▋ | 36/520 [02:27<30:55, 3.83s/it] {'loss': 1.7723, 'grad_norm': 0.009265792715189906, 'learning_rate': 0.19922392066001723, 'epoch': 0.07} + 7%|▋ | 36/520 [02:27<30:55, 3.83s/it] 7%|▋ | 37/520 [02:31<30:51, 3.83s/it] {'loss': 2.0885, 'grad_norm': 0.03232057908059816, 'learning_rate': 0.19914448613738106, 'epoch': 0.07} + 7%|▋ | 37/520 [02:31<30:51, 3.83s/it] 7%|▋ | 38/520 [02:35<30:27, 3.79s/it] {'loss': 1.8479, 'grad_norm': 0.009516374307608804, 'learning_rate': 0.1990611994413053, 'epoch': 0.07} + 7%|▋ | 38/520 [02:35<30:27, 3.79s/it] 8%|▊ | 39/520 [02:38<30:12, 3.77s/it] {'loss': 1.6821, 'grad_norm': 0.011909349555672154, 'learning_rate': 0.19897406380782262, 'epoch': 0.07} + 8%|▊ | 39/520 [02:38<30:12, 3.77s/it] 8%|▊ | 40/520 [02:42<29:52, 3.73s/it] {'loss': 1.6719, 'grad_norm': 0.007635441709977827, 'learning_rate': 0.19888308262251286, 'epoch': 0.08} + 8%|▊ | 40/520 [02:42<29:52, 3.73s/it] 8%|▊ | 41/520 [02:46<29:43, 3.72s/it] {'loss': 1.6661, 'grad_norm': 0.00869694274243427, 'learning_rate': 0.19878825942037148, 'epoch': 0.08} + 8%|▊ | 41/520 [02:46<29:43, 3.72s/it] 8%|▊ | 42/520 [02:49<29:29, 3.70s/it] {'loss': 1.7061, 'grad_norm': 0.010782855146219928, 'learning_rate': 0.19868959788567211, 'epoch': 0.08} + 8%|▊ | 42/520 [02:49<29:29, 3.70s/it] 8%|▊ | 43/520 [02:53<29:24, 3.70s/it] {'loss': 1.9223, 'grad_norm': 0.020614798757402946, 'learning_rate': 0.1985871018518236, 'epoch': 0.08} + 8%|▊ | 43/520 [02:53<29:24, 3.70s/it] 8%|▊ | 44/520 [02:57<29:29, 3.72s/it] {'loss': 2.0541, 'grad_norm': 0.014811915379892276, 'learning_rate': 0.19848077530122082, 'epoch': 0.08} + 8%|▊ | 44/520 [02:57<29:29, 3.72s/it] 9%|▊ | 45/520 [03:00<29:32, 3.73s/it] {'loss': 1.667, 'grad_norm': 0.010967786080984752, 'learning_rate': 0.19837062236509015, 'epoch': 0.09} + 9%|▊ | 45/520 [03:01<29:32, 3.73s/it] 9%|▉ | 46/520 [03:04<29:32, 3.74s/it] {'loss': 2.0911, 'grad_norm': 0.011634456863282414, 'learning_rate': 0.19825664732332884, 'epoch': 0.09} + 9%|▉ | 46/520 [03:04<29:32, 3.74s/it] 9%|▉ | 47/520 [03:08<29:24, 3.73s/it] {'loss': 1.6702, 'grad_norm': 0.014688143480638585, 'learning_rate': 0.19813885460433878, 'epoch': 0.09} + 9%|▉ | 47/520 [03:08<29:24, 3.73s/it] 9%|▉ | 48/520 [03:12<29:04, 3.70s/it] {'loss': 1.6412, 'grad_norm': 0.007761246878224468, 'learning_rate': 0.19801724878485438, 'epoch': 0.09} + 9%|▉ | 48/520 [03:12<29:04, 3.70s/it] 9%|▉ | 49/520 [03:15<28:51, 3.68s/it] {'loss': 1.6664, 'grad_norm': 0.01251324905131802, 'learning_rate': 0.19789183458976486, 'epoch': 0.09} + 9%|▉ | 49/520 [03:15<28:51, 3.68s/it] 10%|▉ | 50/520 [03:19<28:55, 3.69s/it] {'loss': 1.6359, 'grad_norm': 0.007514159090439979, 'learning_rate': 0.19776261689193048, 'epoch': 0.1} + 10%|▉ | 50/520 [03:19<28:55, 3.69s/it] 10%|▉ | 51/520 [03:23<28:56, 3.70s/it] {'loss': 1.5507, 'grad_norm': 0.010521202105571146, 'learning_rate': 0.19762960071199334, 'epoch': 0.1} + 10%|▉ | 51/520 [03:23<28:56, 3.70s/it] 10%|█ | 52/520 [03:26<28:44, 3.69s/it] {'loss': 1.7127, 'grad_norm': 0.015531932688172117, 'learning_rate': 0.19749279121818236, 'epoch': 0.1} + 10%|█ | 52/520 [03:26<28:44, 3.69s/it] 10%|█ | 53/520 [03:30<28:36, 3.68s/it] {'loss': 1.6958, 'grad_norm': 0.00878221463165698, 'learning_rate': 0.19735219372611235, 'epoch': 0.1} + 10%|█ | 53/520 [03:30<28:36, 3.68s/it] 10%|█ | 54/520 [03:34<28:45, 3.70s/it] {'loss': 1.5703, 'grad_norm': 0.013494720925220301, 'learning_rate': 0.19720781369857746, 'epoch': 0.1} + 10%|█ | 54/520 [03:34<28:45, 3.70s/it] 11%|█ | 55/520 [03:37<28:45, 3.71s/it] {'loss': 1.5624, 'grad_norm': 0.007995168879568205, 'learning_rate': 0.1970596567453391, 'epoch': 0.11} + 11%|█ | 55/520 [03:37<28:45, 3.71s/it] 11%|█ | 56/520 [03:41<28:47, 3.72s/it] {'loss': 1.7317, 'grad_norm': 0.011677218849521179, 'learning_rate': 0.1969077286229078, 'epoch': 0.11} + 11%|█ | 56/520 [03:41<28:47, 3.72s/it] 11%|█ | 57/520 [03:45<28:38, 3.71s/it] {'loss': 1.5631, 'grad_norm': 0.012175707856560187, 'learning_rate': 0.19675203523431964, 'epoch': 0.11} + 11%|█ | 57/520 [03:45<28:38, 3.71s/it] 11%|█ | 58/520 [03:49<28:48, 3.74s/it] {'loss': 1.706, 'grad_norm': 0.006068213140784988, 'learning_rate': 0.19659258262890683, 'epoch': 0.11} + 11%|█ | 58/520 [03:49<28:48, 3.74s/it] 11%|█▏ | 59/520 [03:52<28:42, 3.74s/it] {'loss': 1.8559, 'grad_norm': 0.01705361973796989, 'learning_rate': 0.19642937700206278, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:52<28:42, 3.74s/it] 12%|█▏ | 60/520 [03:56<28:38, 3.74s/it] {'loss': 1.631, 'grad_norm': 0.011786915726942924, 'learning_rate': 0.19626242469500121, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:56<28:38, 3.74s/it] 12%|█▏ | 61/520 [04:00<28:34, 3.74s/it] {'loss': 1.919, 'grad_norm': 0.0091811906749794, 'learning_rate': 0.19609173219450998, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:00<28:34, 3.74s/it] 12%|█▏ | 62/520 [04:04<28:43, 3.76s/it] {'loss': 1.6179, 'grad_norm': 0.011095682155234233, 'learning_rate': 0.19591730613269878, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:04<28:43, 3.76s/it] 12%|█▏ | 63/520 [04:08<28:45, 3.78s/it] {'loss': 1.584, 'grad_norm': 0.007966719380668995, 'learning_rate': 0.19573915328674182, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:08<28:45, 3.78s/it] 12%|█▏ | 64/520 [04:11<28:50, 3.79s/it] {'loss': 1.6025, 'grad_norm': 0.007046795925350541, 'learning_rate': 0.1955572805786141, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:11<28:50, 3.79s/it] 12%|█▎ | 65/520 [04:15<28:38, 3.78s/it] {'loss': 1.6213, 'grad_norm': 0.008317824725718506, 'learning_rate': 0.1953716950748227, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:15<28:38, 3.78s/it] 13%|█▎ | 66/520 [04:19<28:33, 3.77s/it] {'loss': 1.5799, 'grad_norm': 0.008690620315405434, 'learning_rate': 0.19518240398613226, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:19<28:33, 3.77s/it] 13%|█▎ | 67/520 [04:23<28:16, 3.74s/it] {'loss': 1.4345, 'grad_norm': 0.00673175090985792, 'learning_rate': 0.1949894146672846, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:23<28:16, 3.74s/it] 13%|█▎ | 68/520 [04:26<28:12, 3.74s/it] {'loss': 1.4947, 'grad_norm': 0.0071811651572547845, 'learning_rate': 0.1947927346167132, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:26<28:12, 3.74s/it] 13%|█▎ | 69/520 [04:30<28:04, 3.73s/it] {'loss': 1.4891, 'grad_norm': 0.00869143717644172, 'learning_rate': 0.1945923714762516, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:30<28:04, 3.73s/it] 13%|█▎ | 70/520 [04:34<27:50, 3.71s/it] {'loss': 1.5462, 'grad_norm': 0.0067729060933072625, 'learning_rate': 0.19438833303083677, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:34<27:50, 3.71s/it] 14%|█▎ | 71/520 [04:37<27:36, 3.69s/it] {'loss': 1.4558, 'grad_norm': 0.0077552722850001135, 'learning_rate': 0.19418062720820636, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:37<27:36, 3.69s/it] 14%|█▍ | 72/520 [04:41<27:28, 3.68s/it] {'loss': 1.608, 'grad_norm': 0.006629037490358121, 'learning_rate': 0.19396926207859086, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:41<27:28, 3.68s/it] 14%|█▍ | 73/520 [04:45<27:23, 3.68s/it] {'loss': 1.4225, 'grad_norm': 0.006126054478870303, 'learning_rate': 0.19375424585439993, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:45<27:23, 3.68s/it] 14%|█▍ | 74/520 [04:48<27:34, 3.71s/it] {'loss': 1.5777, 'grad_norm': 0.007506407893897683, 'learning_rate': 0.1935355868899034, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:48<27:34, 3.71s/it] 14%|█▍ | 75/520 [04:52<27:31, 3.71s/it] {'loss': 1.432, 'grad_norm': 0.005868673965494597, 'learning_rate': 0.19331329368090666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:52<27:31, 3.71s/it] 15%|█▍ | 76/520 [04:56<27:16, 3.69s/it] {'loss': 1.9276, 'grad_norm': 0.009536252533867808, 'learning_rate': 0.19308737486442043, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:56<27:16, 3.69s/it] 15%|█▍ | 77/520 [04:59<27:18, 3.70s/it] {'loss': 1.3631, 'grad_norm': 0.007115206874173993, 'learning_rate': 0.19285783921832536, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:59<27:18, 3.70s/it] 15%|█▌ | 78/520 [05:03<27:35, 3.75s/it] {'loss': 1.5098, 'grad_norm': 0.006033579154517731, 'learning_rate': 0.19262469566103088, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:03<27:35, 3.75s/it] 15%|█▌ | 79/520 [05:07<27:34, 3.75s/it] {'loss': 1.4918, 'grad_norm': 0.00646089721315262, 'learning_rate': 0.19238795325112867, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:07<27:34, 3.75s/it] 15%|█▌ | 80/520 [05:11<27:45, 3.78s/it] {'loss': 1.9002, 'grad_norm': 0.009234242132054331, 'learning_rate': 0.19214762118704076, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:11<27:45, 3.78s/it] 16%|█▌ | 81/520 [05:15<27:59, 3.83s/it] {'loss': 1.6528, 'grad_norm': 0.007848995059116291, 'learning_rate': 0.19190370880666208, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:15<27:59, 3.83s/it] 16%|█▌ | 82/520 [05:19<28:09, 3.86s/it] {'loss': 1.5499, 'grad_norm': 0.006271652883746201, 'learning_rate': 0.19165622558699763, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:19<28:09, 3.86s/it] 16%|█▌ | 83/520 [05:23<27:52, 3.83s/it] {'loss': 1.593, 'grad_norm': 0.00630136945364371, 'learning_rate': 0.19140518114379435, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:23<27:52, 3.83s/it] 16%|█▌ | 84/520 [05:26<27:29, 3.78s/it] {'loss': 1.5766, 'grad_norm': 0.0062966475776591434, 'learning_rate': 0.19115058523116735, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:26<27:29, 3.78s/it] 16%|█▋ | 85/520 [05:30<27:21, 3.77s/it] {'loss': 1.5863, 'grad_norm': 0.007343120213594399, 'learning_rate': 0.1908924477412211, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:30<27:21, 3.77s/it] 17%|█▋ | 86/520 [05:34<27:48, 3.85s/it] {'loss': 1.6447, 'grad_norm': 0.0069381487080655046, 'learning_rate': 0.19063077870366502, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:34<27:48, 3.85s/it] 17%|█▋ | 87/520 [05:38<28:19, 3.92s/it] {'loss': 1.8498, 'grad_norm': 0.009151518184202059, 'learning_rate': 0.1903655882854237, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:38<28:19, 3.92s/it] 17%|█▋ | 88/520 [05:42<28:38, 3.98s/it] {'loss': 1.9144, 'grad_norm': 0.012195727648208437, 'learning_rate': 0.19009688679024192, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:42<28:38, 3.98s/it] 17%|█▋ | 89/520 [05:46<28:51, 4.02s/it] {'loss': 1.5509, 'grad_norm': 0.006101591771689832, 'learning_rate': 0.18982468465828442, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:46<28:51, 4.02s/it] 17%|█▋ | 90/520 [05:50<28:43, 4.01s/it] {'loss': 1.4822, 'grad_norm': 0.006635545316361062, 'learning_rate': 0.1895489924657301, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:50<28:43, 4.01s/it] 18%|█▊ | 91/520 [05:54<28:25, 3.98s/it] {'loss': 1.5627, 'grad_norm': 0.005291721664921626, 'learning_rate': 0.18926982092436118, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:54<28:25, 3.98s/it] 18%|█▊ | 92/520 [05:58<28:10, 3.95s/it] {'loss': 1.4985, 'grad_norm': 0.006407891315058607, 'learning_rate': 0.18898718088114688, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:58<28:10, 3.95s/it] 18%|█▊ | 93/520 [06:02<28:03, 3.94s/it] {'loss': 1.5042, 'grad_norm': 0.006551319640398861, 'learning_rate': 0.18870108331782218, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:02<28:03, 3.94s/it] 18%|█▊ | 94/520 [06:06<27:55, 3.93s/it] {'loss': 1.6143, 'grad_norm': 0.005965637599693361, 'learning_rate': 0.18841153935046098, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:06<27:55, 3.93s/it] 18%|█▊ | 95/520 [06:10<27:53, 3.94s/it] {'loss': 1.4801, 'grad_norm': 0.006332146054262738, 'learning_rate': 0.18811856022904425, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:10<27:53, 3.94s/it] 18%|█▊ | 96/520 [06:14<27:48, 3.93s/it] {'loss': 1.4829, 'grad_norm': 0.005377725531169132, 'learning_rate': 0.18782215733702287, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:14<27:48, 3.93s/it] 19%|█▊ | 97/520 [06:18<27:49, 3.95s/it] {'loss': 1.4576, 'grad_norm': 0.006912583564878244, 'learning_rate': 0.18752234219087538, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:18<27:49, 3.95s/it] 19%|█▉ | 98/520 [06:22<27:41, 3.94s/it] {'loss': 1.4542, 'grad_norm': 0.004982957375520554, 'learning_rate': 0.18721912643966054, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:22<27:41, 3.94s/it] 19%|█▉ | 99/520 [06:26<27:33, 3.93s/it] {'loss': 1.4754, 'grad_norm': 0.005242442152000852, 'learning_rate': 0.18691252186456464, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:26<27:33, 3.93s/it] 19%|█▉ | 100/520 [06:30<27:35, 3.94s/it] {'loss': 1.6722, 'grad_norm': 0.008613832175592953, 'learning_rate': 0.1866025403784439, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:30<27:35, 3.94s/it] 19%|█▉ | 101/520 [06:34<27:29, 3.94s/it] {'loss': 1.4616, 'grad_norm': 0.006217545608168523, 'learning_rate': 0.18628919402536132, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:34<27:29, 3.94s/it] 20%|█▉ | 102/520 [06:37<27:22, 3.93s/it] {'loss': 1.4929, 'grad_norm': 0.0066637360624043525, 'learning_rate': 0.18597249498011903, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:37<27:22, 3.93s/it] 20%|█▉ | 103/520 [06:41<27:14, 3.92s/it] {'loss': 1.405, 'grad_norm': 0.004973426109876475, 'learning_rate': 0.18565245554778517, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:41<27:14, 3.92s/it] 20%|██ | 104/520 [06:45<27:05, 3.91s/it] {'loss': 1.4902, 'grad_norm': 0.006750292064412045, 'learning_rate': 0.18532908816321558, 'epoch': 0.2} + 20%|██ | 104/520 [06:45<27:05, 3.91s/it] 20%|██ | 105/520 [06:49<27:01, 3.91s/it] {'loss': 1.482, 'grad_norm': 0.0050812589884182855, 'learning_rate': 0.18500240539057092, 'epoch': 0.2} + 20%|██ | 105/520 [06:49<27:01, 3.91s/it] 20%|██ | 106/520 [06:53<26:57, 3.91s/it] {'loss': 1.6422, 'grad_norm': 0.009104454485689303, 'learning_rate': 0.18467241992282843, 'epoch': 0.2} + 20%|██ | 106/520 [06:53<26:57, 3.91s/it] 21%|██ | 107/520 [06:57<26:51, 3.90s/it] {'loss': 1.6294, 'grad_norm': 0.006304829070981324, 'learning_rate': 0.18433914458128858, 'epoch': 0.21} + 21%|██ | 107/520 [06:57<26:51, 3.90s/it] 21%|██ | 108/520 [07:01<26:50, 3.91s/it] {'loss': 1.4361, 'grad_norm': 0.006223099491316061, 'learning_rate': 0.18400259231507718, 'epoch': 0.21} + 21%|██ | 108/520 [07:01<26:50, 3.91s/it] 21%|██ | 109/520 [07:05<26:45, 3.91s/it] {'loss': 1.6055, 'grad_norm': 0.006162866439780072, 'learning_rate': 0.18366277620064198, 'epoch': 0.21} + 21%|██ | 109/520 [07:05<26:45, 3.91s/it] 21%|██ | 110/520 [07:09<26:43, 3.91s/it] {'loss': 1.6246, 'grad_norm': 0.006267335124678517, 'learning_rate': 0.1833197094412449, 'epoch': 0.21} + 21%|██ | 110/520 [07:09<26:43, 3.91s/it] 21%|██▏ | 111/520 [07:13<26:36, 3.90s/it] {'loss': 1.6394, 'grad_norm': 0.005749612217861827, 'learning_rate': 0.18297340536644877, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:13<26:36, 3.90s/it] 22%|██▏ | 112/520 [07:16<26:28, 3.89s/it] {'loss': 1.512, 'grad_norm': 0.006055502776265165, 'learning_rate': 0.1826238774315995, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:16<26:28, 3.89s/it] 22%|██▏ | 113/520 [07:20<26:24, 3.89s/it] {'loss': 1.3706, 'grad_norm': 0.005210105647192459, 'learning_rate': 0.18227113921730334, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:20<26:24, 3.89s/it] 22%|██▏ | 114/520 [07:24<26:20, 3.89s/it] {'loss': 1.4871, 'grad_norm': 0.005833621760623801, 'learning_rate': 0.1819152044288992, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:24<26:20, 3.89s/it] 22%|██▏ | 115/520 [07:28<26:02, 3.86s/it] {'loss': 1.6115, 'grad_norm': 0.005425886909607745, 'learning_rate': 0.18155608689592603, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:28<26:02, 3.86s/it] 22%|██▏ | 116/520 [07:32<25:39, 3.81s/it] {'loss': 1.6069, 'grad_norm': 0.005210754480730656, 'learning_rate': 0.18119380057158568, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:32<25:39, 3.81s/it] 22%|██▎ | 117/520 [07:35<25:17, 3.77s/it] {'loss': 1.5901, 'grad_norm': 0.0055465410450133154, 'learning_rate': 0.18082835953220056, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:35<25:17, 3.77s/it] 23%|██▎ | 118/520 [07:39<25:13, 3.77s/it] {'loss': 1.4559, 'grad_norm': 0.0050142839825054325, 'learning_rate': 0.18045977797666685, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:39<25:13, 3.77s/it] 23%|██▎ | 119/520 [07:43<25:28, 3.81s/it] {'loss': 1.4061, 'grad_norm': 0.005840604673368683, 'learning_rate': 0.1800880702259028, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:43<25:28, 3.81s/it] 23%|██▎ | 120/520 [07:47<25:30, 3.83s/it] {'loss': 1.4426, 'grad_norm': 0.006406769649136632, 'learning_rate': 0.17971325072229227, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:47<25:30, 3.83s/it] 23%|██▎ | 121/520 [07:51<25:25, 3.82s/it] {'loss': 1.4938, 'grad_norm': 0.006612307195051424, 'learning_rate': 0.17933533402912352, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:51<25:25, 3.82s/it] 23%|██▎ | 122/520 [07:55<25:26, 3.84s/it] {'loss': 1.383, 'grad_norm': 0.005035375140682725, 'learning_rate': 0.17895433483002354, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:55<25:26, 3.84s/it] 24%|██▎ | 123/520 [07:58<25:25, 3.84s/it] {'loss': 1.6893, 'grad_norm': 0.008818274846333563, 'learning_rate': 0.17857026792838737, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:58<25:25, 3.84s/it] 24%|██▍ | 124/520 [08:02<25:23, 3.85s/it] {'loss': 1.4777, 'grad_norm': 0.005875930488210656, 'learning_rate': 0.178183148246803, 'epoch': 0.24} + 24%|██▍ | 124/520 [08:02<25:23, 3.85s/it] 24%|██▍ | 125/520 [08:06<25:19, 3.85s/it] {'loss': 1.4588, 'grad_norm': 0.005422934753650742, 'learning_rate': 0.1777929908264715, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:06<25:19, 3.85s/it] 24%|██▍ | 126/520 [08:11<26:32, 4.04s/it] {'loss': 1.5761, 'grad_norm': 0.005719135391539116, 'learning_rate': 0.17739981082662276, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:11<26:32, 4.04s/it] 24%|██▍ | 127/520 [08:14<26:07, 3.99s/it] {'loss': 1.4297, 'grad_norm': 0.0061002390773645075, 'learning_rate': 0.1770036235239263, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:15<26:07, 3.99s/it] 25%|██▍ | 128/520 [08:18<25:51, 3.96s/it] {'loss': 1.493, 'grad_norm': 0.005787736699997964, 'learning_rate': 0.1766044443118978, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:18<25:51, 3.96s/it] 25%|██▍ | 129/520 [08:22<25:38, 3.93s/it] {'loss': 1.4023, 'grad_norm': 0.004577950853133838, 'learning_rate': 0.17620228870030108, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:22<25:38, 3.93s/it] 25%|██▌ | 130/520 [08:26<25:25, 3.91s/it] {'loss': 1.4718, 'grad_norm': 0.00464970607310251, 'learning_rate': 0.1757971723145453, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:26<25:25, 3.91s/it] 25%|██▌ | 131/520 [08:30<25:15, 3.90s/it] {'loss': 1.538, 'grad_norm': 0.005762406714756639, 'learning_rate': 0.175389110895078, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:30<25:15, 3.90s/it] 25%|██▌ | 132/520 [08:34<25:02, 3.87s/it] {'loss': 1.5196, 'grad_norm': 0.005469779588997773, 'learning_rate': 0.17497812029677343, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:34<25:02, 3.87s/it] 26%|██▌ | 133/520 [08:38<24:59, 3.87s/it] {'loss': 1.4226, 'grad_norm': 0.005433966943309059, 'learning_rate': 0.17456421648831655, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:38<24:59, 3.87s/it] 26%|██▌ | 134/520 [08:42<24:51, 3.86s/it] {'loss': 1.5002, 'grad_norm': 0.005106469268289579, 'learning_rate': 0.17414741555158267, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:42<24:51, 3.86s/it] 26%|██▌ | 135/520 [08:45<24:46, 3.86s/it] {'loss': 1.5798, 'grad_norm': 0.005432920905238568, 'learning_rate': 0.1737277336810124, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:45<24:46, 3.86s/it] 26%|██▌ | 136/520 [08:49<24:43, 3.86s/it] {'loss': 1.5002, 'grad_norm': 0.00520652396665753, 'learning_rate': 0.17330518718298263, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:49<24:43, 3.86s/it] 26%|██▋ | 137/520 [08:53<24:35, 3.85s/it] {'loss': 1.4155, 'grad_norm': 0.006182383196565826, 'learning_rate': 0.17287979247517285, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:53<24:35, 3.85s/it] 27%|██▋ | 138/520 [08:57<24:31, 3.85s/it] {'loss': 1.4218, 'grad_norm': 0.005218527613281468, 'learning_rate': 0.17245156608592727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:57<24:31, 3.85s/it] 27%|██▋ | 139/520 [09:01<24:26, 3.85s/it] {'loss': 1.4329, 'grad_norm': 0.005387648527188836, 'learning_rate': 0.17202052465361267, 'epoch': 0.27} + 27%|██▋ | 139/520 [09:01<24:26, 3.85s/it] 27%|██▋ | 140/520 [09:05<24:26, 3.86s/it] {'loss': 1.5842, 'grad_norm': 0.006541514297584473, 'learning_rate': 0.17158668492597184, 'epoch': 0.27} + 27%|██▋ | 140/520 [09:05<24:26, 3.86s/it] 27%|██▋ | 141/520 [09:08<24:22, 3.86s/it] {'loss': 1.5451, 'grad_norm': 0.005945881689491424, 'learning_rate': 0.17115006375947303, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:09<24:22, 3.86s/it] 27%|██▋ | 142/520 [09:12<24:16, 3.85s/it] {'loss': 1.634, 'grad_norm': 0.005659204488189142, 'learning_rate': 0.17071067811865476, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:12<24:16, 3.85s/it] 28%|██▊ | 143/520 [09:16<24:12, 3.85s/it] {'loss': 1.4678, 'grad_norm': 0.007219973566297393, 'learning_rate': 0.17026854507546693, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:16<24:12, 3.85s/it] 28%|██▊ | 144/520 [09:20<24:11, 3.86s/it] {'loss': 1.4145, 'grad_norm': 0.005862160296314743, 'learning_rate': 0.1698236818086073, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:20<24:11, 3.86s/it] 28%|██▊ | 145/520 [09:24<24:07, 3.86s/it] {'loss': 1.3358, 'grad_norm': 0.0048979146027395715, 'learning_rate': 0.16937610560285418, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:24<24:07, 3.86s/it] 28%|██▊ | 146/520 [09:28<24:05, 3.86s/it] {'loss': 1.658, 'grad_norm': 0.006869281245283992, 'learning_rate': 0.1689258338483947, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:28<24:05, 3.86s/it] 28%|██▊ | 147/520 [09:32<23:59, 3.86s/it] {'loss': 1.3846, 'grad_norm': 0.0050200233041555985, 'learning_rate': 0.16847288404014937, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:32<23:59, 3.86s/it] 28%|██▊ | 148/520 [09:35<23:52, 3.85s/it] {'loss': 1.4233, 'grad_norm': 0.005485388605344647, 'learning_rate': 0.16801727377709194, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:35<23:52, 3.85s/it] 29%|██▊ | 149/520 [09:39<23:50, 3.86s/it] {'loss': 1.3753, 'grad_norm': 0.006142034416124654, 'learning_rate': 0.16755902076156604, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:39<23:50, 3.86s/it] 29%|██▉ | 150/520 [09:43<23:45, 3.85s/it] {'loss': 1.6043, 'grad_norm': 0.0054946195813065325, 'learning_rate': 0.16709814279859703, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:43<23:45, 3.85s/it] 29%|██▉ | 151/520 [09:47<23:41, 3.85s/it] {'loss': 1.4086, 'grad_norm': 0.005741402425763124, 'learning_rate': 0.1666346577952004, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:47<23:41, 3.85s/it] 29%|██▉ | 152/520 [09:51<23:42, 3.87s/it] {'loss': 1.3723, 'grad_norm': 0.006445266014796771, 'learning_rate': 0.16616858375968596, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:51<23:42, 3.87s/it] 29%|██▉ | 153/520 [09:55<23:35, 3.86s/it] {'loss': 1.4123, 'grad_norm': 0.005072916025975721, 'learning_rate': 0.16569993880095807, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:55<23:35, 3.86s/it] 30%|██▉ | 154/520 [09:59<23:29, 3.85s/it] {'loss': 1.4967, 'grad_norm': 0.006084694666192399, 'learning_rate': 0.16522874112781213, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:59<23:29, 3.85s/it] 30%|██▉ | 155/520 [10:02<23:23, 3.84s/it] {'loss': 1.4039, 'grad_norm': 0.006060252286417368, 'learning_rate': 0.16475500904822704, 'epoch': 0.3} + 30%|██▉ | 155/520 [10:02<23:23, 3.84s/it] 30%|███ | 156/520 [10:06<23:19, 3.84s/it] {'loss': 1.4314, 'grad_norm': 0.0055127630829486735, 'learning_rate': 0.16427876096865393, 'epoch': 0.3} + 30%|███ | 156/520 [10:06<23:19, 3.84s/it] 30%|███ | 157/520 [10:10<23:16, 3.85s/it] {'loss': 1.6645, 'grad_norm': 0.006362836040023043, 'learning_rate': 0.16380001539330089, 'epoch': 0.3} + 30%|███ | 157/520 [10:10<23:16, 3.85s/it] 30%|███ | 158/520 [10:14<23:12, 3.85s/it] {'loss': 1.418, 'grad_norm': 0.007187508951861113, 'learning_rate': 0.163318790923414, 'epoch': 0.3} + 30%|███ | 158/520 [10:14<23:12, 3.85s/it] 31%|███ | 159/520 [10:18<23:10, 3.85s/it] {'loss': 1.4373, 'grad_norm': 0.004914182634832631, 'learning_rate': 0.16283510625655473, 'epoch': 0.31} + 31%|███ | 159/520 [10:18<23:10, 3.85s/it] 31%|███ | 160/520 [10:22<23:04, 3.85s/it] {'loss': 1.4808, 'grad_norm': 0.006856480159621379, 'learning_rate': 0.16234898018587338, 'epoch': 0.31} + 31%|███ | 160/520 [10:22<23:04, 3.85s/it] 31%|███ | 161/520 [10:26<23:00, 3.85s/it] {'loss': 1.4604, 'grad_norm': 0.005571541904952994, 'learning_rate': 0.16186043159937882, 'epoch': 0.31} + 31%|███ | 161/520 [10:26<23:00, 3.85s/it] 31%|███ | 162/520 [10:29<22:56, 3.85s/it] {'loss': 1.5919, 'grad_norm': 0.0058555572103603, 'learning_rate': 0.16136947947920477, 'epoch': 0.31} + 31%|███ | 162/520 [10:29<22:56, 3.85s/it] 31%|███▏ | 163/520 [10:33<22:52, 3.84s/it] {'loss': 1.3214, 'grad_norm': 0.007670601139308934, 'learning_rate': 0.16087614290087207, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:33<22:52, 3.84s/it] 32%|███▏ | 164/520 [10:37<22:54, 3.86s/it] {'loss': 1.2813, 'grad_norm': 0.004782911977588035, 'learning_rate': 0.16038044103254775, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:37<22:54, 3.86s/it] 32%|███▏ | 165/520 [10:41<22:53, 3.87s/it] {'loss': 1.4297, 'grad_norm': 0.004690448847990239, 'learning_rate': 0.15988239313430005, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:41<22:53, 3.87s/it] 32%|███▏ | 166/520 [10:45<22:48, 3.87s/it] {'loss': 1.4421, 'grad_norm': 0.0070247150936984986, 'learning_rate': 0.15938201855735015, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:45<22:48, 3.87s/it] 32%|███▏ | 167/520 [10:49<22:44, 3.87s/it] {'loss': 1.4267, 'grad_norm': 0.00603926719921287, 'learning_rate': 0.15887933674332047, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:49<22:44, 3.87s/it] 32%|███▏ | 168/520 [10:53<22:42, 3.87s/it] {'loss': 1.3486, 'grad_norm': 0.005983679034878625, 'learning_rate': 0.158374367223479, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:53<22:42, 3.87s/it] 32%|███▎ | 169/520 [10:57<22:43, 3.89s/it] {'loss': 1.437, 'grad_norm': 0.0059848645892271145, 'learning_rate': 0.1578671296179806, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:57<22:43, 3.89s/it] 33%|███▎ | 170/520 [11:00<22:43, 3.89s/it] {'loss': 1.4891, 'grad_norm': 0.005364117236331797, 'learning_rate': 0.15735764363510463, 'epoch': 0.33} + 33%|███▎ | 170/520 [11:00<22:43, 3.89s/it] 33%|███▎ | 171/520 [11:04<22:40, 3.90s/it] {'loss': 1.365, 'grad_norm': 0.005295755962758616, 'learning_rate': 0.15684592907048925, 'epoch': 0.33} + 33%|███▎ | 171/520 [11:04<22:40, 3.90s/it] 33%|███▎ | 172/520 [11:08<22:37, 3.90s/it] {'loss': 1.4305, 'grad_norm': 0.006231038900457953, 'learning_rate': 0.1563320058063622, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:08<22:37, 3.90s/it] 33%|███▎ | 173/520 [11:12<22:34, 3.90s/it] {'loss': 1.3611, 'grad_norm': 0.004578225551281908, 'learning_rate': 0.15581589381076844, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:12<22:34, 3.90s/it] 33%|███▎ | 174/520 [11:16<22:30, 3.90s/it] {'loss': 1.4363, 'grad_norm': 0.0055173387767857626, 'learning_rate': 0.15529761313679394, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:16<22:30, 3.90s/it] 34%|███▎ | 175/520 [11:20<22:30, 3.91s/it] {'loss': 1.3405, 'grad_norm': 0.005497199434274044, 'learning_rate': 0.15477718392178716, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:20<22:30, 3.91s/it] 34%|███▍ | 176/520 [11:24<22:22, 3.90s/it] {'loss': 1.5921, 'grad_norm': 0.005547869933003853, 'learning_rate': 0.15425462638657594, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:24<22:22, 3.90s/it] 34%|███▍ | 177/520 [11:28<22:23, 3.92s/it] {'loss': 1.4419, 'grad_norm': 0.0046999459427059225, 'learning_rate': 0.1537299608346824, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:28<22:23, 3.92s/it] 34%|███▍ | 178/520 [11:32<22:14, 3.90s/it] {'loss': 1.4145, 'grad_norm': 0.005375031904905031, 'learning_rate': 0.15320320765153367, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:32<22:14, 3.90s/it] 34%|███▍ | 179/520 [11:35<21:54, 3.86s/it] {'loss': 1.5047, 'grad_norm': 0.004578473389492211, 'learning_rate': 0.15267438730367008, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:35<21:54, 3.86s/it] 35%|███▍ | 180/520 [11:39<21:37, 3.82s/it] {'loss': 1.4074, 'grad_norm': 0.005113459386403676, 'learning_rate': 0.1521435203379498, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:39<21:37, 3.82s/it] 35%|███▍ | 181/520 [11:43<21:24, 3.79s/it] {'loss': 1.3791, 'grad_norm': 0.0042241400943570945, 'learning_rate': 0.15161062738075068, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:43<21:24, 3.79s/it] 35%|███▌ | 182/520 [11:47<21:10, 3.76s/it] {'loss': 1.3955, 'grad_norm': 0.004616726425245251, 'learning_rate': 0.1510757291371686, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:47<21:10, 3.76s/it] 35%|███▌ | 183/520 [11:50<21:02, 3.75s/it] {'loss': 1.4284, 'grad_norm': 0.00476927835674738, 'learning_rate': 0.1505388463902131, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:50<21:02, 3.75s/it] 35%|███▌ | 184/520 [11:54<20:53, 3.73s/it] {'loss': 1.3198, 'grad_norm': 0.006526336172287247, 'learning_rate': 0.15000000000000002, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:54<20:53, 3.73s/it] 36%|███▌ | 185/520 [11:58<20:49, 3.73s/it] {'loss': 1.5206, 'grad_norm': 0.005040305286700489, 'learning_rate': 0.14945921090294076, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:58<20:49, 3.73s/it] 36%|███▌ | 186/520 [12:01<20:37, 3.70s/it] {'loss': 1.3499, 'grad_norm': 0.005512275705074487, 'learning_rate': 0.14891650011092894, 'epoch': 0.36} + 36%|███▌ | 186/520 [12:01<20:37, 3.70s/it] 36%|███▌ | 187/520 [12:05<20:30, 3.70s/it] {'loss': 1.3685, 'grad_norm': 0.007167839366440906, 'learning_rate': 0.14837188871052398, 'epoch': 0.36} + 36%|███▌ | 187/520 [12:05<20:30, 3.70s/it] 36%|███▌ | 188/520 [12:09<20:22, 3.68s/it] {'loss': 1.4467, 'grad_norm': 0.005354658725019577, 'learning_rate': 0.14782539786213184, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:09<20:22, 3.68s/it] 36%|███▋ | 189/520 [12:12<20:16, 3.67s/it] {'loss': 1.4624, 'grad_norm': 0.004677956909732322, 'learning_rate': 0.1472770487991827, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:12<20:16, 3.67s/it] 37%|███▋ | 190/520 [12:16<20:09, 3.66s/it] {'loss': 1.3613, 'grad_norm': 0.005879923870502793, 'learning_rate': 0.1467268628273062, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:16<20:09, 3.66s/it] 37%|███▋ | 191/520 [12:20<20:07, 3.67s/it] {'loss': 1.3184, 'grad_norm': 0.0046575842023194374, 'learning_rate': 0.1461748613235034, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:20<20:07, 3.67s/it] 37%|███▋ | 192/520 [12:23<20:02, 3.67s/it] {'loss': 1.42, 'grad_norm': 0.0050441646725143405, 'learning_rate': 0.1456210657353163, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:23<20:02, 3.67s/it] 37%|███▋ | 193/520 [12:27<19:57, 3.66s/it] {'loss': 1.5317, 'grad_norm': 0.006015257860717458, 'learning_rate': 0.14506549757999454, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:27<19:57, 3.66s/it] 37%|███▋ | 194/520 [12:31<19:53, 3.66s/it] {'loss': 1.3715, 'grad_norm': 0.005138350900094149, 'learning_rate': 0.14450817844365924, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:31<19:53, 3.66s/it] 38%|███▊ | 195/520 [12:34<19:46, 3.65s/it] {'loss': 1.4392, 'grad_norm': 0.004956533331798028, 'learning_rate': 0.1439491299804645, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:34<19:46, 3.65s/it] 38%|███▊ | 196/520 [12:38<19:44, 3.66s/it] {'loss': 1.4019, 'grad_norm': 0.006615131444687799, 'learning_rate': 0.14338837391175582, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:38<19:44, 3.66s/it] 38%|███▊ | 197/520 [12:42<19:41, 3.66s/it] {'loss': 1.3602, 'grad_norm': 0.004854598726962294, 'learning_rate': 0.14282593202522628, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:42<19:41, 3.66s/it] 38%|███▊ | 198/520 [12:45<19:34, 3.65s/it] {'loss': 1.4395, 'grad_norm': 0.005597564979679155, 'learning_rate': 0.14226182617406996, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:45<19:34, 3.65s/it] 38%|███▊ | 199/520 [12:49<19:31, 3.65s/it] {'loss': 1.3444, 'grad_norm': 0.004720087320699509, 'learning_rate': 0.14169607827613281, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:49<19:31, 3.65s/it] 38%|███▊ | 200/520 [12:53<19:26, 3.65s/it] {'loss': 1.4483, 'grad_norm': 0.005918526913824339, 'learning_rate': 0.14112871031306118, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:53<19:26, 3.65s/it] 39%|███▊ | 201/520 [12:56<19:22, 3.65s/it] {'loss': 1.4554, 'grad_norm': 0.005301771261001529, 'learning_rate': 0.1405597443294475, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:56<19:22, 3.65s/it] 39%|███▉ | 202/520 [13:00<19:20, 3.65s/it] {'loss': 1.3385, 'grad_norm': 0.004898699744403816, 'learning_rate': 0.13998920243197407, 'epoch': 0.39} + 39%|███▉ | 202/520 [13:00<19:20, 3.65s/it] 39%|███▉ | 203/520 [13:04<19:19, 3.66s/it] {'loss': 1.3933, 'grad_norm': 0.004720807818013876, 'learning_rate': 0.13941710678855396, 'epoch': 0.39} + 39%|███▉ | 203/520 [13:04<19:19, 3.66s/it] 39%|███▉ | 204/520 [13:07<19:15, 3.66s/it] {'loss': 1.4362, 'grad_norm': 0.00566731642891857, 'learning_rate': 0.13884347962746948, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:07<19:15, 3.66s/it] 39%|███▉ | 205/520 [13:11<19:17, 3.67s/it] {'loss': 1.4705, 'grad_norm': 0.005487070779894392, 'learning_rate': 0.138268343236509, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:11<19:17, 3.67s/it] 40%|███▉ | 206/520 [13:15<19:14, 3.68s/it] {'loss': 1.483, 'grad_norm': 0.005026960103311874, 'learning_rate': 0.13769171996210053, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:15<19:14, 3.68s/it] 40%|███▉ | 207/520 [13:18<19:11, 3.68s/it] {'loss': 1.4376, 'grad_norm': 0.004428344215195919, 'learning_rate': 0.1371136322084438, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:18<19:11, 3.68s/it] 40%|████ | 208/520 [13:22<19:05, 3.67s/it] {'loss': 1.4296, 'grad_norm': 0.005153854519987145, 'learning_rate': 0.13653410243663952, 'epoch': 0.4} + 40%|████ | 208/520 [13:22<19:05, 3.67s/it] 40%|████ | 209/520 [13:26<19:01, 3.67s/it] {'loss': 1.3623, 'grad_norm': 0.00489548225340607, 'learning_rate': 0.13595315316381676, 'epoch': 0.4} + 40%|████ | 209/520 [13:26<19:01, 3.67s/it] 40%|████ | 210/520 [13:29<18:56, 3.67s/it] {'loss': 1.4355, 'grad_norm': 0.005225065780860117, 'learning_rate': 0.13537080696225814, 'epoch': 0.4} + 40%|████ | 210/520 [13:29<18:56, 3.67s/it] 41%|████ | 211/520 [13:33<18:56, 3.68s/it] {'loss': 1.4443, 'grad_norm': 0.004464416405655529, 'learning_rate': 0.13478708645852272, 'epoch': 0.41} + 41%|████ | 211/520 [13:33<18:56, 3.68s/it] 41%|████ | 212/520 [13:37<18:50, 3.67s/it] {'loss': 1.4078, 'grad_norm': 0.004546013996111945, 'learning_rate': 0.1342020143325669, 'epoch': 0.41} + 41%|████ | 212/520 [13:37<18:50, 3.67s/it] 41%|████ | 213/520 [13:40<18:51, 3.69s/it] {'loss': 1.3732, 'grad_norm': 0.005474184458154775, 'learning_rate': 0.13361561331686309, 'epoch': 0.41} + 41%|████ | 213/520 [13:40<18:51, 3.69s/it] 41%|████ | 214/520 [13:44<18:44, 3.68s/it] {'loss': 1.3621, 'grad_norm': 0.005192126809532077, 'learning_rate': 0.13302790619551672, 'epoch': 0.41} + 41%|████ | 214/520 [13:44<18:44, 3.68s/it] 41%|████▏ | 215/520 [13:48<18:41, 3.68s/it] {'loss': 1.373, 'grad_norm': 0.005136827462556873, 'learning_rate': 0.1324389158033807, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:48<18:41, 3.68s/it] 42%|████▏ | 216/520 [13:51<18:40, 3.69s/it] {'loss': 1.2743, 'grad_norm': 0.004729206512209152, 'learning_rate': 0.13184866502516845, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:51<18:40, 3.69s/it] 42%|████▏ | 217/520 [13:55<18:37, 3.69s/it] {'loss': 1.4039, 'grad_norm': 0.005032376141295673, 'learning_rate': 0.13125717679456447, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:55<18:37, 3.69s/it] 42%|████▏ | 218/520 [13:59<18:33, 3.69s/it] {'loss': 1.4031, 'grad_norm': 0.00510163627064917, 'learning_rate': 0.13066447409333345, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:59<18:33, 3.69s/it] 42%|████▏ | 219/520 [14:02<18:26, 3.68s/it] {'loss': 1.3635, 'grad_norm': 0.004276954069770759, 'learning_rate': 0.1300705799504273, 'epoch': 0.42} + 42%|████▏ | 219/520 [14:02<18:26, 3.68s/it] 42%|████▏ | 220/520 [14:06<18:22, 3.68s/it] {'loss': 1.4317, 'grad_norm': 0.0056067409972889145, 'learning_rate': 0.12947551744109043, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:06<18:22, 3.68s/it] 42%|████▎ | 221/520 [14:10<18:19, 3.68s/it] {'loss': 1.4102, 'grad_norm': 0.004761995856490033, 'learning_rate': 0.128879309685963, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:10<18:19, 3.68s/it] 43%|████▎ | 222/520 [14:13<18:19, 3.69s/it] {'loss': 1.3127, 'grad_norm': 0.0047501424298131804, 'learning_rate': 0.12828197985018275, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:13<18:19, 3.69s/it] 43%|████▎ | 223/520 [14:17<18:13, 3.68s/it] {'loss': 1.2936, 'grad_norm': 0.00446595574924699, 'learning_rate': 0.12768355114248495, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:17<18:13, 3.68s/it] 43%|████▎ | 224/520 [14:21<18:09, 3.68s/it] {'loss': 1.6388, 'grad_norm': 0.00819925014492022, 'learning_rate': 0.12708404681430052, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:21<18:09, 3.68s/it] 43%|████▎ | 225/520 [14:24<18:06, 3.68s/it] {'loss': 1.3257, 'grad_norm': 0.005498911892438459, 'learning_rate': 0.1264834901588527, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:24<18:06, 3.68s/it] 43%|████▎ | 226/520 [14:28<18:09, 3.71s/it] {'loss': 1.4275, 'grad_norm': 0.004511688041428275, 'learning_rate': 0.12588190451025208, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:28<18:09, 3.71s/it] 44%|████▎ | 227/520 [14:32<18:16, 3.74s/it] {'loss': 1.4125, 'grad_norm': 0.004741710688780891, 'learning_rate': 0.12527931324258976, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:32<18:16, 3.74s/it] 44%|████▍ | 228/520 [14:36<18:21, 3.77s/it] {'loss': 1.6098, 'grad_norm': 0.006412186229582023, 'learning_rate': 0.12467573976902935, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:36<18:21, 3.77s/it] 44%|████▍ | 229/520 [14:40<18:24, 3.80s/it] {'loss': 1.3874, 'grad_norm': 0.004190625903708462, 'learning_rate': 0.12407120754089732, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:40<18:24, 3.80s/it] 44%|████▍ | 230/520 [14:44<18:26, 3.82s/it] {'loss': 1.2574, 'grad_norm': 0.004637704418564142, 'learning_rate': 0.12346574004677154, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:44<18:26, 3.82s/it] 44%|████▍ | 231/520 [14:47<18:26, 3.83s/it] {'loss': 1.3147, 'grad_norm': 0.004499602683250771, 'learning_rate': 0.12285936081156897, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:47<18:26, 3.83s/it] 45%|████▍ | 232/520 [14:51<18:24, 3.84s/it] {'loss': 1.601, 'grad_norm': 0.005337771442405905, 'learning_rate': 0.12225209339563144, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:51<18:24, 3.84s/it] 45%|████▍ | 233/520 [14:55<18:22, 3.84s/it] {'loss': 1.4737, 'grad_norm': 0.00602207569589672, 'learning_rate': 0.12164396139381028, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:55<18:22, 3.84s/it] 45%|████▌ | 234/520 [14:59<18:23, 3.86s/it] {'loss': 1.2729, 'grad_norm': 0.0047081577773637265, 'learning_rate': 0.12103498843454959, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:59<18:23, 3.86s/it] 45%|████▌ | 235/520 [15:03<18:16, 3.85s/it] {'loss': 1.3262, 'grad_norm': 0.0053064026225424525, 'learning_rate': 0.12042519817896805, 'epoch': 0.45} + 45%|████▌ | 235/520 [15:03<18:16, 3.85s/it] 45%|████▌ | 236/520 [15:07<18:01, 3.81s/it] {'loss': 1.4518, 'grad_norm': 0.00432299989389269, 'learning_rate': 0.11981461431993977, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:07<18:01, 3.81s/it] 46%|████▌ | 237/520 [15:10<17:51, 3.79s/it] {'loss': 1.4131, 'grad_norm': 0.004943579430623929, 'learning_rate': 0.11920326058117364, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:10<17:51, 3.79s/it] 46%|████▌ | 238/520 [15:14<17:42, 3.77s/it] {'loss': 1.3491, 'grad_norm': 0.004666263675275581, 'learning_rate': 0.11859116071629149, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:14<17:42, 3.77s/it] 46%|████▌ | 239/520 [15:18<17:36, 3.76s/it] {'loss': 1.4484, 'grad_norm': 0.005250020284256481, 'learning_rate': 0.11797833850790528, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:18<17:36, 3.76s/it] 46%|████▌ | 240/520 [15:22<17:30, 3.75s/it] {'loss': 1.2076, 'grad_norm': 0.004874871276833494, 'learning_rate': 0.11736481776669305, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:22<17:30, 3.75s/it] 46%|████▋ | 241/520 [15:25<17:25, 3.75s/it] {'loss': 1.3038, 'grad_norm': 0.00458166561428954, 'learning_rate': 0.11675062233047365, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:25<17:25, 3.75s/it] 47%|████▋ | 242/520 [15:29<17:27, 3.77s/it] {'loss': 1.3173, 'grad_norm': 0.004339272753953903, 'learning_rate': 0.11613577606328068, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:29<17:27, 3.77s/it] 47%|████▋ | 243/520 [15:33<17:33, 3.80s/it] {'loss': 1.3053, 'grad_norm': 0.004570837114357859, 'learning_rate': 0.11552030285443515, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:33<17:33, 3.80s/it] 47%|████▋ | 244/520 [15:37<17:37, 3.83s/it] {'loss': 1.4508, 'grad_norm': 0.004658542508569245, 'learning_rate': 0.11490422661761744, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:37<17:37, 3.83s/it] 47%|████▋ | 245/520 [15:41<17:39, 3.85s/it] {'loss': 1.2974, 'grad_norm': 0.004479420312032171, 'learning_rate': 0.11428757128993801, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:41<17:39, 3.85s/it] 47%|████▋ | 246/520 [15:45<17:37, 3.86s/it] {'loss': 1.5766, 'grad_norm': 0.005234277377259361, 'learning_rate': 0.11367036083100734, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:45<17:37, 3.86s/it] 48%|████▊ | 247/520 [15:49<17:38, 3.88s/it] {'loss': 1.4805, 'grad_norm': 0.0046772995295185265, 'learning_rate': 0.11305261922200519, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:49<17:38, 3.88s/it] 48%|████▊ | 248/520 [15:52<17:34, 3.88s/it] {'loss': 1.2943, 'grad_norm': 0.004558716445990644, 'learning_rate': 0.11243437046474854, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:52<17:34, 3.88s/it] 48%|████▊ | 249/520 [15:56<17:18, 3.83s/it] {'loss': 1.4099, 'grad_norm': 0.004733970864736967, 'learning_rate': 0.1118156385807593, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:56<17:18, 3.83s/it] 48%|████▊ | 250/520 [16:00<17:07, 3.80s/it] {'loss': 1.3523, 'grad_norm': 0.005201482626681937, 'learning_rate': 0.11119644761033079, 'epoch': 0.48} + 48%|████▊ | 250/520 [16:00<17:07, 3.80s/it] 48%|████▊ | 251/520 [16:04<16:56, 3.78s/it] {'loss': 1.4086, 'grad_norm': 0.004217293864830822, 'learning_rate': 0.1105768216115938, 'epoch': 0.48} + 48%|████▊ | 251/520 [16:04<16:56, 3.78s/it] 48%|████▊ | 252/520 [16:07<16:51, 3.77s/it] {'loss': 1.4453, 'grad_norm': 0.004484485055884972, 'learning_rate': 0.10995678465958168, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:07<16:51, 3.77s/it] 49%|████▊ | 253/520 [16:11<16:42, 3.75s/it] {'loss': 1.4159, 'grad_norm': 0.005284095362391107, 'learning_rate': 0.10933636084529506, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:11<16:42, 3.75s/it] 49%|████▉ | 254/520 [16:15<16:33, 3.74s/it] {'loss': 1.3146, 'grad_norm': 0.004177637396744659, 'learning_rate': 0.10871557427476584, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:15<16:33, 3.74s/it] 49%|████▉ | 255/520 [16:19<16:27, 3.73s/it] {'loss': 1.3216, 'grad_norm': 0.004504780392035137, 'learning_rate': 0.10809444906812034, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:19<16:27, 3.73s/it] 49%|████▉ | 256/520 [16:22<16:21, 3.72s/it] {'loss': 1.3758, 'grad_norm': 0.0051118648959747074, 'learning_rate': 0.10747300935864244, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:22<16:21, 3.72s/it] 49%|████▉ | 257/520 [16:26<16:23, 3.74s/it] {'loss': 1.3704, 'grad_norm': 0.004531506078871795, 'learning_rate': 0.10685127929183567, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:26<16:23, 3.74s/it] 50%|████▉ | 258/520 [16:30<16:15, 3.72s/it] {'loss': 1.3894, 'grad_norm': 0.004017405492179728, 'learning_rate': 0.10622928302448523, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:30<16:15, 3.72s/it] 50%|████▉ | 259/520 [16:33<16:08, 3.71s/it] {'loss': 1.4528, 'grad_norm': 0.004964353202781076, 'learning_rate': 0.10560704472371918, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:33<16:08, 3.71s/it] 50%|█████ | 260/520 [16:37<16:05, 3.71s/it] {'loss': 1.5329, 'grad_norm': 0.004670681388925476, 'learning_rate': 0.10498458856606972, 'epoch': 0.5} + 50%|█████ | 260/520 [16:37<16:05, 3.71s/it] 50%|█████ | 261/520 [16:41<15:59, 3.70s/it] {'loss': 1.4542, 'grad_norm': 0.0050642250648769305, 'learning_rate': 0.10436193873653361, 'epoch': 0.5} + 50%|█████ | 261/520 [16:41<15:59, 3.70s/it] 50%|█████ | 262/520 [16:44<15:53, 3.70s/it] {'loss': 1.2927, 'grad_norm': 0.004617953147526421, 'learning_rate': 0.10373911942763259, 'epoch': 0.5} + 50%|█████ | 262/520 [16:44<15:53, 3.70s/it] 51%|█████ | 263/520 [16:48<15:48, 3.69s/it] {'loss': 1.474, 'grad_norm': 0.004695272669445723, 'learning_rate': 0.10311615483847332, 'epoch': 0.51} + 51%|█████ | 263/520 [16:48<15:48, 3.69s/it] 51%|█████ | 264/520 [16:52<15:46, 3.70s/it] {'loss': 1.4137, 'grad_norm': 0.004473587843509257, 'learning_rate': 0.1024930691738073, 'epoch': 0.51} + 51%|█████ | 264/520 [16:52<15:46, 3.70s/it] 51%|█████ | 265/520 [16:56<15:42, 3.70s/it] {'loss': 1.3037, 'grad_norm': 0.005822661060666409, 'learning_rate': 0.10186988664309023, 'epoch': 0.51} + 51%|█████ | 265/520 [16:56<15:42, 3.70s/it] 51%|█████ | 266/520 [16:59<15:40, 3.70s/it] {'loss': 1.1526, 'grad_norm': 0.004187251530981966, 'learning_rate': 0.10124663145954152, 'epoch': 0.51} + 51%|█████ | 266/520 [16:59<15:40, 3.70s/it] 51%|█████▏ | 267/520 [17:03<15:36, 3.70s/it] {'loss': 1.2966, 'grad_norm': 0.004804538378549097, 'learning_rate': 0.10062332783920336, 'epoch': 0.51} + 51%|█████▏ | 267/520 [17:03<15:36, 3.70s/it] 52%|█████▏ | 268/520 [17:07<15:34, 3.71s/it] {'loss': 1.6078, 'grad_norm': 0.006262056157779803, 'learning_rate': 0.1, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:07<15:34, 3.71s/it] 52%|█████▏ | 269/520 [17:10<15:29, 3.70s/it] {'loss': 1.4191, 'grad_norm': 0.004774682383252235, 'learning_rate': 0.09937667216079665, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:10<15:29, 3.70s/it] 52%|█████▏ | 270/520 [17:14<15:26, 3.70s/it] {'loss': 1.3825, 'grad_norm': 0.004808923116387663, 'learning_rate': 0.0987533685404585, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:14<15:26, 3.70s/it] 52%|█████▏ | 271/520 [17:18<15:22, 3.70s/it] {'loss': 1.4029, 'grad_norm': 0.004894535714415266, 'learning_rate': 0.0981301133569098, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:18<15:22, 3.70s/it] 52%|█████▏ | 272/520 [17:21<15:18, 3.70s/it] {'loss': 1.3866, 'grad_norm': 0.005387912980950067, 'learning_rate': 0.09750693082619273, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:21<15:18, 3.70s/it] 52%|█████▎ | 273/520 [17:25<15:11, 3.69s/it] {'loss': 1.5346, 'grad_norm': 0.006481968571450978, 'learning_rate': 0.0968838451615267, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:25<15:11, 3.69s/it] 53%|█████▎ | 274/520 [17:29<15:09, 3.70s/it] {'loss': 1.3641, 'grad_norm': 0.004834494497453751, 'learning_rate': 0.09626088057236745, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:29<15:09, 3.70s/it] 53%|█████▎ | 275/520 [17:33<15:03, 3.69s/it] {'loss': 1.2984, 'grad_norm': 0.005718018683281443, 'learning_rate': 0.09563806126346641, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:33<15:03, 3.69s/it] 53%|█████▎ | 276/520 [17:36<14:58, 3.68s/it] {'loss': 1.3929, 'grad_norm': 0.00481365630095135, 'learning_rate': 0.09501541143393027, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:36<14:58, 3.68s/it] 53%|█████▎ | 277/520 [17:40<14:55, 3.68s/it] {'loss': 1.5046, 'grad_norm': 0.005382109774884558, 'learning_rate': 0.09439295527628082, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:40<14:55, 3.68s/it] 53%|█████▎ | 278/520 [17:44<14:51, 3.68s/it] {'loss': 1.2548, 'grad_norm': 0.004303338296169991, 'learning_rate': 0.0937707169755148, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:44<14:51, 3.68s/it] 54%|█████▎ | 279/520 [17:47<14:48, 3.69s/it] {'loss': 1.4099, 'grad_norm': 0.005496595830858098, 'learning_rate': 0.09314872070816434, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:47<14:48, 3.69s/it] 54%|█████▍ | 280/520 [17:51<14:45, 3.69s/it] {'loss': 1.3058, 'grad_norm': 0.0051771361715363725, 'learning_rate': 0.09252699064135758, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:51<14:45, 3.69s/it] 54%|█████▍ | 281/520 [17:55<14:39, 3.68s/it] {'loss': 1.4137, 'grad_norm': 0.005007898252248516, 'learning_rate': 0.09190555093187967, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:55<14:39, 3.68s/it] 54%|█████▍ | 282/520 [17:58<14:34, 3.68s/it] {'loss': 1.2621, 'grad_norm': 0.0042598479874860744, 'learning_rate': 0.09128442572523418, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:58<14:34, 3.68s/it] 54%|█████▍ | 283/520 [18:02<14:32, 3.68s/it] {'loss': 1.4423, 'grad_norm': 0.005321336295024442, 'learning_rate': 0.09066363915470495, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:02<14:32, 3.68s/it] 55%|█████▍ | 284/520 [18:06<14:30, 3.69s/it] {'loss': 1.3693, 'grad_norm': 0.0053128844057651805, 'learning_rate': 0.09004321534041836, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:06<14:30, 3.69s/it] 55%|█████▍ | 285/520 [18:09<14:25, 3.68s/it] {'loss': 1.298, 'grad_norm': 0.004862766490613294, 'learning_rate': 0.08942317838840624, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:09<14:25, 3.68s/it] 55%|█████▌ | 286/520 [18:13<14:17, 3.67s/it] {'loss': 1.1546, 'grad_norm': 0.004965946785664269, 'learning_rate': 0.08880355238966922, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:13<14:17, 3.67s/it] 55%|█████▌ | 287/520 [18:17<14:12, 3.66s/it] {'loss': 1.4134, 'grad_norm': 0.0051894389355605286, 'learning_rate': 0.08818436141924073, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:17<14:12, 3.66s/it] 55%|█████▌ | 288/520 [18:20<14:08, 3.66s/it] {'loss': 1.4599, 'grad_norm': 0.004788791722667379, 'learning_rate': 0.08756562953525152, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:20<14:08, 3.66s/it] 56%|█████▌ | 289/520 [18:24<14:06, 3.66s/it] {'loss': 1.3055, 'grad_norm': 0.004228093138117983, 'learning_rate': 0.08694738077799487, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:24<14:06, 3.66s/it] 56%|█████▌ | 290/520 [18:28<14:02, 3.66s/it] {'loss': 1.2193, 'grad_norm': 0.00416540903812051, 'learning_rate': 0.08632963916899268, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:28<14:02, 3.66s/it] 56%|█████▌ | 291/520 [18:31<14:00, 3.67s/it] {'loss': 1.283, 'grad_norm': 0.004960149062287511, 'learning_rate': 0.08571242871006202, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:31<14:00, 3.67s/it] 56%|█████▌ | 292/520 [18:35<13:57, 3.67s/it] {'loss': 1.3286, 'grad_norm': 0.004235594106439168, 'learning_rate': 0.08509577338238256, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:35<13:57, 3.67s/it] 56%|█████▋ | 293/520 [18:39<13:52, 3.67s/it] {'loss': 1.2694, 'grad_norm': 0.0048496927217908875, 'learning_rate': 0.08447969714556484, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:39<13:52, 3.67s/it] 57%|█████▋ | 294/520 [18:42<13:51, 3.68s/it] {'loss': 1.3011, 'grad_norm': 0.00495841319532525, 'learning_rate': 0.08386422393671933, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:42<13:51, 3.68s/it] 57%|█████▋ | 295/520 [18:46<13:47, 3.68s/it] {'loss': 1.4692, 'grad_norm': 0.007534152614842384, 'learning_rate': 0.08324937766952638, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:46<13:47, 3.68s/it] 57%|█████▋ | 296/520 [18:50<13:51, 3.71s/it] {'loss': 1.2422, 'grad_norm': 0.004514943891527169, 'learning_rate': 0.08263518223330697, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:50<13:51, 3.71s/it] 57%|█████▋ | 297/520 [18:54<13:59, 3.77s/it] {'loss': 1.3813, 'grad_norm': 0.004988817440737053, 'learning_rate': 0.08202166149209474, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:54<13:59, 3.77s/it] 57%|█████▋ | 298/520 [18:58<14:03, 3.80s/it] {'loss': 1.3403, 'grad_norm': 0.004440429417168353, 'learning_rate': 0.08140883928370855, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:58<14:03, 3.80s/it] 57%|█████▊ | 299/520 [19:01<14:09, 3.84s/it] {'loss': 1.4499, 'grad_norm': 0.00463273881958505, 'learning_rate': 0.0807967394188264, 'epoch': 0.57} + 57%|█████▊ | 299/520 [19:02<14:09, 3.84s/it] 58%|█████▊ | 300/520 [19:05<14:10, 3.87s/it] {'loss': 1.4052, 'grad_norm': 0.004583956616774441, 'learning_rate': 0.08018538568006027, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:05<14:10, 3.87s/it] 58%|█████▊ | 301/520 [19:09<14:13, 3.90s/it] {'loss': 1.371, 'grad_norm': 0.005100832653053673, 'learning_rate': 0.07957480182103199, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:09<14:13, 3.90s/it] 58%|█████▊ | 302/520 [19:13<14:10, 3.90s/it] {'loss': 1.4657, 'grad_norm': 0.004962905937767371, 'learning_rate': 0.07896501156545044, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:13<14:10, 3.90s/it] 58%|█████▊ | 303/520 [19:17<14:07, 3.91s/it] {'loss': 1.3037, 'grad_norm': 0.004831863508726481, 'learning_rate': 0.07835603860618973, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:17<14:07, 3.91s/it] 58%|█████▊ | 304/520 [19:21<14:01, 3.89s/it] {'loss': 1.3736, 'grad_norm': 0.005088769208281415, 'learning_rate': 0.07774790660436857, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:21<14:01, 3.89s/it] 59%|█████▊ | 305/520 [19:25<13:55, 3.89s/it] {'loss': 1.4248, 'grad_norm': 0.005379921107669074, 'learning_rate': 0.07714063918843106, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:25<13:55, 3.89s/it] 59%|█████▉ | 306/520 [19:29<13:51, 3.88s/it] {'loss': 1.3514, 'grad_norm': 0.004558056432454362, 'learning_rate': 0.0765342599532285, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:29<13:51, 3.88s/it] 59%|█████▉ | 307/520 [19:33<14:13, 4.01s/it] {'loss': 1.2898, 'grad_norm': 0.00452079662351938, 'learning_rate': 0.07592879245910272, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:33<14:13, 4.01s/it] 59%|█████▉ | 308/520 [19:37<14:00, 3.96s/it] {'loss': 1.4118, 'grad_norm': 0.004426335386171522, 'learning_rate': 0.07532426023097064, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:37<14:00, 3.96s/it] 59%|█████▉ | 309/520 [19:41<13:50, 3.93s/it] {'loss': 1.2745, 'grad_norm': 0.004128450545241333, 'learning_rate': 0.07472068675741024, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:41<13:50, 3.93s/it] 60%|█████▉ | 310/520 [19:45<13:42, 3.92s/it] {'loss': 1.2574, 'grad_norm': 0.004296398106539992, 'learning_rate': 0.07411809548974792, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:45<13:42, 3.92s/it] 60%|█████▉ | 311/520 [19:49<13:37, 3.91s/it] {'loss': 1.2258, 'grad_norm': 0.004352103261767984, 'learning_rate': 0.07351650984114727, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:49<13:37, 3.91s/it] 60%|██████ | 312/520 [19:53<13:32, 3.91s/it] {'loss': 1.2179, 'grad_norm': 0.004672794611660331, 'learning_rate': 0.0729159531856995, 'epoch': 0.6} + 60%|██████ | 312/520 [19:53<13:32, 3.91s/it] 60%|██████ | 313/520 [19:56<13:28, 3.91s/it] {'loss': 1.2083, 'grad_norm': 0.00412912236425507, 'learning_rate': 0.07231644885751508, 'epoch': 0.6} + 60%|██████ | 313/520 [19:56<13:28, 3.91s/it] 60%|██████ | 314/520 [20:01<13:52, 4.04s/it] {'loss': 1.2479, 'grad_norm': 0.004547805678578713, 'learning_rate': 0.07171802014981725, 'epoch': 0.6} + 60%|██████ | 314/520 [20:01<13:52, 4.04s/it] 61%|██████ | 315/520 [20:05<13:39, 4.00s/it] {'loss': 1.4669, 'grad_norm': 0.006678927151951772, 'learning_rate': 0.07112069031403703, 'epoch': 0.61} + 61%|██████ | 315/520 [20:05<13:39, 4.00s/it] 61%|██████ | 316/520 [20:09<14:00, 4.12s/it] {'loss': 1.218, 'grad_norm': 0.00509262945128505, 'learning_rate': 0.07052448255890957, 'epoch': 0.61} + 61%|██████ | 316/520 [20:09<14:00, 4.12s/it] 61%|██████ | 317/520 [20:13<13:42, 4.05s/it] {'loss': 1.2429, 'grad_norm': 0.004239350487261425, 'learning_rate': 0.0699294200495727, 'epoch': 0.61} + 61%|██████ | 317/520 [20:13<13:42, 4.05s/it] 61%|██████ | 318/520 [20:17<13:28, 4.00s/it] {'loss': 1.3756, 'grad_norm': 0.005122106860320505, 'learning_rate': 0.06933552590666658, 'epoch': 0.61} + 61%|██████ | 318/520 [20:17<13:28, 4.00s/it] 61%|██████▏ | 319/520 [20:21<13:43, 4.10s/it] {'loss': 1.2346, 'grad_norm': 0.004778083043566846, 'learning_rate': 0.06874282320543557, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:21<13:43, 4.10s/it] 62%|██████▏ | 320/520 [20:25<13:25, 4.03s/it] {'loss': 1.173, 'grad_norm': 0.004473338262350326, 'learning_rate': 0.06815133497483157, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:25<13:25, 4.03s/it] 62%|██████▏ | 321/520 [20:29<13:15, 4.00s/it] {'loss': 1.3907, 'grad_norm': 0.004934865412696096, 'learning_rate': 0.06756108419661931, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:29<13:15, 4.00s/it] 62%|██████▏ | 322/520 [20:33<13:06, 3.97s/it] {'loss': 1.2883, 'grad_norm': 0.005176311968446879, 'learning_rate': 0.06697209380448332, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:33<13:06, 3.97s/it] 62%|██████▏ | 323/520 [20:37<12:59, 3.96s/it] {'loss': 1.378, 'grad_norm': 0.005707939537857428, 'learning_rate': 0.06638438668313694, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:37<12:59, 3.96s/it] 62%|██████▏ | 324/520 [20:41<12:57, 3.97s/it] {'loss': 1.3136, 'grad_norm': 0.005430899910424726, 'learning_rate': 0.06579798566743314, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:41<12:57, 3.97s/it] 62%|██████▎ | 325/520 [20:45<12:52, 3.96s/it] {'loss': 1.324, 'grad_norm': 0.004728212643800526, 'learning_rate': 0.06521291354147728, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:45<12:52, 3.96s/it] 63%|██████▎ | 326/520 [20:49<12:46, 3.95s/it] {'loss': 1.3044, 'grad_norm': 0.004734554065837309, 'learning_rate': 0.06462919303774187, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:49<12:46, 3.95s/it] 63%|██████▎ | 327/520 [20:53<12:36, 3.92s/it] {'loss': 1.4518, 'grad_norm': 0.00672346392354258, 'learning_rate': 0.06404684683618325, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:53<12:36, 3.92s/it] 63%|██████▎ | 328/520 [20:56<12:20, 3.86s/it] {'loss': 1.3778, 'grad_norm': 0.004787424208429177, 'learning_rate': 0.0634658975633605, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:56<12:20, 3.86s/it] 63%|██████▎ | 329/520 [21:00<12:07, 3.81s/it] {'loss': 1.2176, 'grad_norm': 0.004156282200431162, 'learning_rate': 0.06288636779155621, 'epoch': 0.63} + 63%|██████▎ | 329/520 [21:00<12:07, 3.81s/it] 63%|██████▎ | 330/520 [21:04<11:58, 3.78s/it] {'loss': 1.2939, 'grad_norm': 0.004435178638110258, 'learning_rate': 0.06230828003789948, 'epoch': 0.63} + 63%|██████▎ | 330/520 [21:04<11:58, 3.78s/it] 64%|██████▎ | 331/520 [21:07<11:51, 3.76s/it] {'loss': 1.258, 'grad_norm': 0.004517138343189912, 'learning_rate': 0.06173165676349103, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:07<11:51, 3.76s/it] 64%|██████▍ | 332/520 [21:11<11:51, 3.78s/it] {'loss': 1.4524, 'grad_norm': 0.004744946803872803, 'learning_rate': 0.06115652037253053, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:11<11:51, 3.78s/it] 64%|██████▍ | 333/520 [21:15<11:52, 3.81s/it] {'loss': 1.4334, 'grad_norm': 0.004985877956165103, 'learning_rate': 0.06058289321144608, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:15<11:52, 3.81s/it] 64%|██████▍ | 334/520 [21:19<11:46, 3.80s/it] {'loss': 1.3072, 'grad_norm': 0.0049305492131423816, 'learning_rate': 0.06001079756802592, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:19<11:46, 3.80s/it] 64%|██████▍ | 335/520 [21:23<11:37, 3.77s/it] {'loss': 1.3058, 'grad_norm': 0.004513255468676595, 'learning_rate': 0.059440255670552514, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:23<11:37, 3.77s/it] 65%|██████▍ | 336/520 [21:26<11:29, 3.75s/it] {'loss': 1.1943, 'grad_norm': 0.005483526666351493, 'learning_rate': 0.05887128968693887, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:26<11:29, 3.75s/it] 65%|██████▍ | 337/520 [21:30<11:24, 3.74s/it] {'loss': 1.19, 'grad_norm': 0.004695935517470418, 'learning_rate': 0.058303921723867225, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:30<11:24, 3.74s/it] 65%|██████▌ | 338/520 [21:34<11:17, 3.72s/it] {'loss': 1.3204, 'grad_norm': 0.004493608338071966, 'learning_rate': 0.05773817382593008, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:34<11:17, 3.72s/it] 65%|██████▌ | 339/520 [21:37<11:12, 3.72s/it] {'loss': 1.2598, 'grad_norm': 0.004813560684781134, 'learning_rate': 0.057174067974773715, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:37<11:12, 3.72s/it] 65%|██████▌ | 340/520 [21:41<11:07, 3.71s/it] {'loss': 1.2445, 'grad_norm': 0.00429028150696847, 'learning_rate': 0.056611626088244195, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:41<11:07, 3.71s/it] 66%|██████▌ | 341/520 [21:45<11:03, 3.70s/it] {'loss': 1.2726, 'grad_norm': 0.004666638998174834, 'learning_rate': 0.056050870019535494, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:45<11:03, 3.70s/it] 66%|██████▌ | 342/520 [21:48<11:00, 3.71s/it] {'loss': 1.4162, 'grad_norm': 0.00543276918971017, 'learning_rate': 0.05549182155634076, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:48<11:00, 3.71s/it] 66%|██████▌ | 343/520 [21:52<10:55, 3.70s/it] {'loss': 1.3778, 'grad_norm': 0.004711969223232143, 'learning_rate': 0.054934502420005464, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:52<10:55, 3.70s/it] 66%|██████▌ | 344/520 [21:56<10:58, 3.74s/it] {'loss': 1.2152, 'grad_norm': 0.004378314512476279, 'learning_rate': 0.0543789342646837, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:56<10:58, 3.74s/it] 66%|██████▋ | 345/520 [22:00<10:54, 3.74s/it] {'loss': 1.3399, 'grad_norm': 0.004709805562873466, 'learning_rate': 0.05382513867649663, 'epoch': 0.66} + 66%|██████▋ | 345/520 [22:00<10:54, 3.74s/it] 67%|██████▋ | 346/520 [22:03<10:48, 3.73s/it] {'loss': 1.3701, 'grad_norm': 0.005129937636198666, 'learning_rate': 0.0532731371726938, 'epoch': 0.67} + 67%|██████▋ | 346/520 [22:03<10:48, 3.73s/it] 67%|██████▋ | 347/520 [22:07<10:51, 3.77s/it] {'loss': 1.2352, 'grad_norm': 0.004112062457506123, 'learning_rate': 0.05272295120081732, 'epoch': 0.67} + 67%|██████▋ | 347/520 [22:07<10:51, 3.77s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:11<10:55, 3.81s/it] {'loss': 1.1972, 'grad_norm': 0.005712404221191833, 'learning_rate': 0.05217460213786821, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:11<10:55, 3.81s/it] 67%|██████▋ | 349/520 [22:15<10:46, 3.78s/it] {'loss': 1.2405, 'grad_norm': 0.004534225079875403, 'learning_rate': 0.051628111289476024, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:15<10:46, 3.78s/it] 67%|██████▋ | 350/520 [22:19<10:40, 3.77s/it] {'loss': 1.2706, 'grad_norm': 0.005103803166952982, 'learning_rate': 0.051083499889071106, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:19<10:40, 3.77s/it] 68%|██████▊ | 351/520 [22:22<10:32, 3.74s/it] {'loss': 1.1772, 'grad_norm': 0.004282809238619725, 'learning_rate': 0.05054078909705926, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:22<10:32, 3.74s/it] 68%|██████▊ | 352/520 [22:26<10:31, 3.76s/it] {'loss': 1.3079, 'grad_norm': 0.004640333154893899, 'learning_rate': 0.050000000000000024, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:26<10:31, 3.76s/it] 68%|██████▊ | 353/520 [22:30<10:29, 3.77s/it] {'loss': 1.3109, 'grad_norm': 0.004576209569456194, 'learning_rate': 0.04946115360978696, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:30<10:29, 3.77s/it] 68%|██████▊ | 354/520 [22:34<10:27, 3.78s/it] {'loss': 1.4499, 'grad_norm': 0.004538166368315754, 'learning_rate': 0.048924270862831465, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:34<10:27, 3.78s/it] 68%|██████▊ | 355/520 [22:38<10:28, 3.81s/it] {'loss': 1.2439, 'grad_norm': 0.0044131111618730045, 'learning_rate': 0.04838937261924933, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:38<10:28, 3.81s/it] 68%|██████▊ | 356/520 [22:41<10:24, 3.81s/it] {'loss': 1.2465, 'grad_norm': 0.004412563235102648, 'learning_rate': 0.0478564796620502, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:41<10:24, 3.81s/it] 69%|██████▊ | 357/520 [22:45<10:21, 3.82s/it] {'loss': 1.2599, 'grad_norm': 0.004079217954265747, 'learning_rate': 0.04732561269632993, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:45<10:21, 3.82s/it] 69%|██████▉ | 358/520 [22:49<10:13, 3.79s/it] {'loss': 1.1971, 'grad_norm': 0.004453179030586387, 'learning_rate': 0.04679679234846636, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:49<10:13, 3.79s/it] 69%|██████▉ | 359/520 [22:53<10:05, 3.76s/it] {'loss': 1.379, 'grad_norm': 0.004745872117775425, 'learning_rate': 0.046270039165317606, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:53<10:05, 3.76s/it] 69%|██████▉ | 360/520 [22:56<10:01, 3.76s/it] {'loss': 1.3923, 'grad_norm': 0.00608201008682682, 'learning_rate': 0.04574537361342407, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:56<10:01, 3.76s/it] 69%|██████▉ | 361/520 [23:00<09:54, 3.74s/it] {'loss': 1.3881, 'grad_norm': 0.004258519544172762, 'learning_rate': 0.04522281607821288, 'epoch': 0.69} + 69%|██████▉ | 361/520 [23:00<09:54, 3.74s/it] 70%|██████▉ | 362/520 [23:04<09:52, 3.75s/it] {'loss': 1.2551, 'grad_norm': 0.004689489080375778, 'learning_rate': 0.04470238686320606, 'epoch': 0.7} + 70%|██████▉ | 362/520 [23:04<09:52, 3.75s/it] 70%|██████▉ | 363/520 [23:08<09:47, 3.74s/it] {'loss': 1.288, 'grad_norm': 0.004348775473093183, 'learning_rate': 0.044184106189231624, 'epoch': 0.7} + 70%|██████▉ | 363/520 [23:08<09:47, 3.74s/it] 70%|███████ | 364/520 [23:11<09:48, 3.78s/it] {'loss': 1.4, 'grad_norm': 0.004652773093482299, 'learning_rate': 0.043667994193637795, 'epoch': 0.7} + 70%|███████ | 364/520 [23:11<09:48, 3.78s/it] 70%|███████ | 365/520 [23:15<09:48, 3.80s/it] {'loss': 1.3535, 'grad_norm': 0.004740225217367755, 'learning_rate': 0.043154070929510784, 'epoch': 0.7} + 70%|███████ | 365/520 [23:15<09:48, 3.80s/it] 70%|███████ | 366/520 [23:19<09:46, 3.81s/it] {'loss': 1.3, 'grad_norm': 0.004384169630543949, 'learning_rate': 0.04264235636489542, 'epoch': 0.7} + 70%|███████ | 366/520 [23:19<09:46, 3.81s/it] 71%|███████ | 367/520 [23:23<09:48, 3.85s/it] {'loss': 1.2992, 'grad_norm': 0.004431535733908157, 'learning_rate': 0.04213287038201943, 'epoch': 0.71} + 71%|███████ | 367/520 [23:23<09:48, 3.85s/it] 71%|███████ | 368/520 [23:27<09:44, 3.85s/it] {'loss': 1.1503, 'grad_norm': 0.004745034650707532, 'learning_rate': 0.04162563277652104, 'epoch': 0.71} + 71%|███████ | 368/520 [23:27<09:44, 3.85s/it] 71%|███████ | 369/520 [23:31<09:44, 3.87s/it] {'loss': 1.3601, 'grad_norm': 0.005120834510155285, 'learning_rate': 0.04112066325667954, 'epoch': 0.71} + 71%|███████ | 369/520 [23:31<09:44, 3.87s/it] 71%|███████ | 370/520 [23:35<09:34, 3.83s/it] {'loss': 1.2147, 'grad_norm': 0.004189056253841069, 'learning_rate': 0.04061798144264986, 'epoch': 0.71} + 71%|███████ | 370/520 [23:35<09:34, 3.83s/it] 71%|███████▏ | 371/520 [23:38<09:24, 3.79s/it] {'loss': 1.2114, 'grad_norm': 0.004563555232515559, 'learning_rate': 0.04011760686569998, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:38<09:24, 3.79s/it] 72%|███████▏ | 372/520 [23:42<09:22, 3.80s/it] {'loss': 1.4556, 'grad_norm': 0.0044482440975225695, 'learning_rate': 0.03961955896745224, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:42<09:22, 3.80s/it] 72%|███████▏ | 373/520 [23:46<09:23, 3.84s/it] {'loss': 1.3319, 'grad_norm': 0.005109726040568238, 'learning_rate': 0.03912385709912794, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:46<09:23, 3.84s/it] 72%|███████▏ | 374/520 [23:50<09:22, 3.85s/it] {'loss': 1.2907, 'grad_norm': 0.004277771853291253, 'learning_rate': 0.038630520520795276, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:50<09:22, 3.85s/it] 72%|███████▏ | 375/520 [23:54<09:20, 3.86s/it] {'loss': 1.1952, 'grad_norm': 0.004369101988802215, 'learning_rate': 0.03813956840062119, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:54<09:20, 3.86s/it] 72%|███████▏ | 376/520 [23:58<09:11, 3.83s/it] {'loss': 1.3262, 'grad_norm': 0.004409829701289282, 'learning_rate': 0.037651019814126656, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:58<09:11, 3.83s/it] 72%|███████▎ | 377/520 [24:01<09:03, 3.80s/it] {'loss': 1.26, 'grad_norm': 0.00668603738636428, 'learning_rate': 0.037164893743445275, 'epoch': 0.72} + 72%|███████▎ | 377/520 [24:01<09:03, 3.80s/it] 73%|███████▎ | 378/520 [24:05<08:55, 3.77s/it] {'loss': 1.3145, 'grad_norm': 0.004476284079639774, 'learning_rate': 0.03668120907658603, 'epoch': 0.73} + 73%|███████▎ | 378/520 [24:05<08:55, 3.77s/it] 73%|███████▎ | 379/520 [24:09<08:52, 3.78s/it] {'loss': 1.3049, 'grad_norm': 0.004219891775170544, 'learning_rate': 0.036199984606699154, 'epoch': 0.73} + 73%|███████▎ | 379/520 [24:09<08:52, 3.78s/it] 73%|███████▎ | 380/520 [24:13<08:51, 3.80s/it] {'loss': 1.4276, 'grad_norm': 0.005007970273563587, 'learning_rate': 0.035721239031346066, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:13<08:51, 3.80s/it] 73%|███████▎ | 381/520 [24:17<08:53, 3.84s/it] {'loss': 1.2929, 'grad_norm': 0.004352405223030084, 'learning_rate': 0.03524499095177297, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:17<08:53, 3.84s/it] 73%|███████▎ | 382/520 [24:20<08:52, 3.86s/it] {'loss': 1.3745, 'grad_norm': 0.005478299096208843, 'learning_rate': 0.03477125887218792, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:20<08:52, 3.86s/it] 74%|███████▎ | 383/520 [24:24<08:49, 3.87s/it] {'loss': 1.1306, 'grad_norm': 0.004739316732077685, 'learning_rate': 0.03430006119904196, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:24<08:49, 3.87s/it] 74%|███████▍ | 384/520 [24:28<08:43, 3.85s/it] {'loss': 1.4908, 'grad_norm': 0.005682766137380848, 'learning_rate': 0.033831416240314084, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:28<08:43, 3.85s/it] 74%|███████▍ | 385/520 [24:32<08:32, 3.80s/it] {'loss': 1.2677, 'grad_norm': 0.004068308060035739, 'learning_rate': 0.03336534220479961, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:32<08:32, 3.80s/it] 74%|███████▍ | 386/520 [24:36<08:24, 3.77s/it] {'loss': 1.2144, 'grad_norm': 0.003922098025117867, 'learning_rate': 0.032901857201403005, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:36<08:24, 3.77s/it] 74%|███████▍ | 387/520 [24:39<08:20, 3.76s/it] {'loss': 1.4555, 'grad_norm': 0.004507965523307118, 'learning_rate': 0.032440979238433976, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:39<08:20, 3.76s/it] 75%|███████▍ | 388/520 [24:43<08:15, 3.75s/it] {'loss': 1.1638, 'grad_norm': 0.004045711298233093, 'learning_rate': 0.03198272622290804, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:43<08:15, 3.75s/it] 75%|███████▍ | 389/520 [24:47<08:08, 3.73s/it] {'loss': 1.227, 'grad_norm': 0.004838529926516719, 'learning_rate': 0.03152711595985065, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:47<08:08, 3.73s/it] 75%|███████▌ | 390/520 [24:50<08:03, 3.72s/it] {'loss': 1.2848, 'grad_norm': 0.004367242245428733, 'learning_rate': 0.031074166151605298, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:50<08:03, 3.72s/it] 75%|███████▌ | 391/520 [24:54<08:00, 3.72s/it] {'loss': 1.3726, 'grad_norm': 0.004363001322677443, 'learning_rate': 0.030623894397145836, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:54<08:00, 3.72s/it] 75%|███████▌ | 392/520 [24:58<07:55, 3.72s/it] {'loss': 1.181, 'grad_norm': 0.00426873348390458, 'learning_rate': 0.03017631819139273, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:58<07:55, 3.72s/it] 76%|███████▌ | 393/520 [25:01<07:50, 3.70s/it] {'loss': 1.2451, 'grad_norm': 0.004058988328977818, 'learning_rate': 0.029731454924533086, 'epoch': 0.76} + 76%|███████▌ | 393/520 [25:01<07:50, 3.70s/it] 76%|███████▌ | 394/520 [25:05<07:44, 3.69s/it] {'loss': 1.2463, 'grad_norm': 0.004601619267983137, 'learning_rate': 0.029289321881345254, 'epoch': 0.76} + 76%|███████▌ | 394/520 [25:05<07:44, 3.69s/it] 76%|███████▌ | 395/520 [25:09<07:40, 3.68s/it] {'loss': 1.2049, 'grad_norm': 0.004896407181041623, 'learning_rate': 0.028849936240527008, 'epoch': 0.76} + 76%|███████▌ | 395/520 [25:09<07:40, 3.68s/it] 76%|███████▌ | 396/520 [25:12<07:35, 3.67s/it] {'loss': 1.2935, 'grad_norm': 0.004480923229004826, 'learning_rate': 0.028413315074028157, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:12<07:35, 3.67s/it] 76%|███████▋ | 397/520 [25:16<07:32, 3.68s/it] {'loss': 1.276, 'grad_norm': 0.0043247055832415894, 'learning_rate': 0.027979475346387363, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:16<07:32, 3.68s/it] 77%|███████▋ | 398/520 [25:20<07:29, 3.68s/it] {'loss': 1.272, 'grad_norm': 0.004595644088486073, 'learning_rate': 0.027548433914072735, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:20<07:29, 3.68s/it] 77%|███████▋ | 399/520 [25:24<07:26, 3.69s/it] {'loss': 1.2942, 'grad_norm': 0.004355425792557337, 'learning_rate': 0.027120207524827168, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:24<07:26, 3.69s/it] 77%|███████▋ | 400/520 [25:27<07:23, 3.69s/it] {'loss': 1.3697, 'grad_norm': 0.008701393450853452, 'learning_rate': 0.02669481281701739, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:27<07:23, 3.69s/it] 77%|███████▋ | 401/520 [25:31<07:18, 3.69s/it] {'loss': 1.0912, 'grad_norm': 0.0045283072429817, 'learning_rate': 0.026272266318987603, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:31<07:18, 3.69s/it] 77%|███████▋ | 402/520 [25:35<07:14, 3.68s/it] {'loss': 1.2072, 'grad_norm': 0.00427912121685409, 'learning_rate': 0.02585258444841733, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:35<07:14, 3.68s/it] 78%|███████▊ | 403/520 [25:38<07:10, 3.68s/it] {'loss': 1.2458, 'grad_norm': 0.004679498391781269, 'learning_rate': 0.025435783511683442, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:38<07:10, 3.68s/it] 78%|███████▊ | 404/520 [25:42<07:05, 3.67s/it] {'loss': 1.1547, 'grad_norm': 0.005492830508905342, 'learning_rate': 0.02502187970322657, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:42<07:05, 3.67s/it] 78%|███████▊ | 405/520 [25:46<07:02, 3.68s/it] {'loss': 1.2998, 'grad_norm': 0.004316080079136869, 'learning_rate': 0.02461088910492202, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:46<07:02, 3.68s/it] 78%|███████▊ | 406/520 [25:49<06:59, 3.68s/it] {'loss': 1.2372, 'grad_norm': 0.005295660635205368, 'learning_rate': 0.02420282768545469, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:49<06:59, 3.68s/it] 78%|███████▊ | 407/520 [25:53<06:55, 3.67s/it] {'loss': 1.3397, 'grad_norm': 0.004561653615908103, 'learning_rate': 0.02379771129969892, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:53<06:55, 3.67s/it] 78%|███████▊ | 408/520 [25:57<06:51, 3.67s/it] {'loss': 1.2292, 'grad_norm': 0.004690644829707213, 'learning_rate': 0.023395555688102213, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:57<06:51, 3.67s/it] 79%|███████▊ | 409/520 [26:00<06:47, 3.67s/it] {'loss': 1.357, 'grad_norm': 0.004922518624601823, 'learning_rate': 0.02299637647607372, 'epoch': 0.79} + 79%|███████▊ | 409/520 [26:00<06:47, 3.67s/it] 79%|███████▉ | 410/520 [26:04<06:43, 3.67s/it] {'loss': 1.0694, 'grad_norm': 0.004088291479697301, 'learning_rate': 0.022600189173377264, 'epoch': 0.79} + 79%|███████▉ | 410/520 [26:04<06:43, 3.67s/it] 79%|███████▉ | 411/520 [26:08<06:41, 3.68s/it] {'loss': 1.3277, 'grad_norm': 0.004719501230046225, 'learning_rate': 0.022207009173528525, 'epoch': 0.79} + 79%|███████▉ | 411/520 [26:08<06:41, 3.68s/it] 79%|███████▉ | 412/520 [26:11<06:41, 3.72s/it] {'loss': 1.2432, 'grad_norm': 0.004296420277862029, 'learning_rate': 0.02181685175319702, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:11<06:41, 3.72s/it] 79%|███████▉ | 413/520 [26:15<06:42, 3.76s/it] {'loss': 1.3354, 'grad_norm': 0.0045471099412465665, 'learning_rate': 0.021429732071612653, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:15<06:42, 3.76s/it] 80%|███████▉ | 414/520 [26:19<06:41, 3.79s/it] {'loss': 1.12, 'grad_norm': 0.004143021363089221, 'learning_rate': 0.02104566516997647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:19<06:41, 3.79s/it] 80%|███████▉ | 415/520 [26:23<06:39, 3.80s/it] {'loss': 1.2183, 'grad_norm': 0.00435441650227844, 'learning_rate': 0.020664665970876496, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:23<06:39, 3.80s/it] 80%|████████ | 416/520 [26:27<06:36, 3.81s/it] {'loss': 1.1308, 'grad_norm': 0.005060493409160795, 'learning_rate': 0.020286749277707784, 'epoch': 0.8} + 80%|████████ | 416/520 [26:27<06:36, 3.81s/it] 80%|████████ | 417/520 [26:31<06:32, 3.82s/it] {'loss': 1.3076, 'grad_norm': 0.004873113183802586, 'learning_rate': 0.019911929774097215, 'epoch': 0.8} + 80%|████████ | 417/520 [26:31<06:32, 3.82s/it] 80%|████████ | 418/520 [26:35<06:29, 3.82s/it] {'loss': 1.2852, 'grad_norm': 0.0040088600948036965, 'learning_rate': 0.019540222023333165, 'epoch': 0.8} + 80%|████████ | 418/520 [26:35<06:29, 3.82s/it] 81%|████████ | 419/520 [26:38<06:26, 3.83s/it] {'loss': 1.2756, 'grad_norm': 0.004664914825251262, 'learning_rate': 0.01917164046779948, 'epoch': 0.81} + 81%|████████ | 419/520 [26:38<06:26, 3.83s/it] 81%|████████ | 420/520 [26:42<06:22, 3.83s/it] {'loss': 1.1546, 'grad_norm': 0.004666953511422527, 'learning_rate': 0.018806199428414352, 'epoch': 0.81} + 81%|████████ | 420/520 [26:42<06:22, 3.83s/it] 81%|████████ | 421/520 [26:46<06:19, 3.83s/it] {'loss': 1.0858, 'grad_norm': 0.004600226670100959, 'learning_rate': 0.018443913104073985, 'epoch': 0.81} + 81%|████████ | 421/520 [26:46<06:19, 3.83s/it] 81%|████████ | 422/520 [26:50<06:15, 3.83s/it] {'loss': 1.2174, 'grad_norm': 0.004760233852831645, 'learning_rate': 0.01808479557110081, 'epoch': 0.81} + 81%|████████ | 422/520 [26:50<06:15, 3.83s/it] 81%|████████▏ | 423/520 [26:54<06:11, 3.83s/it] {'loss': 1.2099, 'grad_norm': 0.005187539394714687, 'learning_rate': 0.017728860782696667, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:54<06:11, 3.83s/it] 82%|████████▏ | 424/520 [26:58<06:08, 3.84s/it] {'loss': 1.4157, 'grad_norm': 0.005003381831985748, 'learning_rate': 0.017376122568400532, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:58<06:08, 3.84s/it] 82%|████████▏ | 425/520 [27:01<06:00, 3.80s/it] {'loss': 1.2072, 'grad_norm': 0.004164995818846969, 'learning_rate': 0.017026594633551252, 'epoch': 0.82} + 82%|████████▏ | 425/520 [27:01<06:00, 3.80s/it] 82%|████████▏ | 426/520 [27:05<05:53, 3.77s/it] {'loss': 1.2347, 'grad_norm': 0.006250942034986301, 'learning_rate': 0.01668029055875512, 'epoch': 0.82} + 82%|████████▏ | 426/520 [27:05<05:53, 3.77s/it] 82%|████████▏ | 427/520 [27:09<05:48, 3.74s/it] {'loss': 1.1459, 'grad_norm': 0.004492441914766432, 'learning_rate': 0.016337223799358026, 'epoch': 0.82} + 82%|████████▏ | 427/520 [27:09<05:48, 3.74s/it] 82%|████████▏ | 428/520 [27:12<05:42, 3.72s/it] {'loss': 1.1148, 'grad_norm': 0.0046650676382555805, 'learning_rate': 0.01599740768492286, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:12<05:42, 3.72s/it] 82%|████████▎ | 429/520 [27:16<05:38, 3.72s/it] {'loss': 1.2218, 'grad_norm': 0.004337700477306054, 'learning_rate': 0.015660855418711452, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:16<05:38, 3.72s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:20<05:33, 3.71s/it] {'loss': 1.2182, 'grad_norm': 0.004035156865650369, 'learning_rate': 0.015327580077171589, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:20<05:33, 3.71s/it] 83%|████████▎ | 431/520 [27:23<05:29, 3.70s/it] {'loss': 1.3049, 'grad_norm': 0.005196520413682987, 'learning_rate': 0.014997594609429088, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:23<05:29, 3.70s/it] 83%|████████▎ | 432/520 [27:27<05:25, 3.70s/it] {'loss': 1.124, 'grad_norm': 0.005027168180659252, 'learning_rate': 0.01467091183678444, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:27<05:25, 3.70s/it] 83%|████████▎ | 433/520 [27:31<05:21, 3.70s/it] {'loss': 1.2618, 'grad_norm': 0.004253091045601232, 'learning_rate': 0.014347544452214867, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:31<05:21, 3.70s/it] 83%|████████▎ | 434/520 [27:34<05:17, 3.70s/it] {'loss': 1.0063, 'grad_norm': 0.004181518309419697, 'learning_rate': 0.014027505019880971, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:34<05:17, 3.70s/it] 84%|████████▎ | 435/520 [27:38<05:13, 3.69s/it] {'loss': 1.2998, 'grad_norm': 0.004683696447422187, 'learning_rate': 0.013710805974638696, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:38<05:13, 3.69s/it] 84%|████████▍ | 436/520 [27:42<05:09, 3.69s/it] {'loss': 1.0929, 'grad_norm': 0.004501726890409121, 'learning_rate': 0.01339745962155613, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:42<05:09, 3.69s/it] 84%|████████▍ | 437/520 [27:45<05:05, 3.68s/it] {'loss': 1.3339, 'grad_norm': 0.0045644891705794045, 'learning_rate': 0.01308747813543536, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:45<05:05, 3.68s/it] 84%|████████▍ | 438/520 [27:49<05:02, 3.69s/it] {'loss': 1.126, 'grad_norm': 0.004378950803283869, 'learning_rate': 0.012780873560339467, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:49<05:02, 3.69s/it] 84%|████████▍ | 439/520 [27:53<04:58, 3.68s/it] {'loss': 1.2608, 'grad_norm': 0.004558662217890677, 'learning_rate': 0.012477657809124632, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:53<04:58, 3.68s/it] 85%|████████▍ | 440/520 [27:57<04:54, 3.68s/it] {'loss': 1.1789, 'grad_norm': 0.004337695842802675, 'learning_rate': 0.012177842662977134, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:57<04:54, 3.68s/it] 85%|████████▍ | 441/520 [28:00<04:50, 3.68s/it] {'loss': 1.3091, 'grad_norm': 0.004831296100352123, 'learning_rate': 0.01188143977095576, 'epoch': 0.85} + 85%|████████▍ | 441/520 [28:00<04:50, 3.68s/it] 85%|████████▌ | 442/520 [28:04<04:46, 3.67s/it] {'loss': 1.2355, 'grad_norm': 0.005064286102057374, 'learning_rate': 0.011588460649539035, 'epoch': 0.85} + 85%|████████▌ | 442/520 [28:04<04:46, 3.67s/it] 85%|████████▌ | 443/520 [28:08<04:42, 3.66s/it] {'loss': 1.2582, 'grad_norm': 0.004642383996329421, 'learning_rate': 0.011298916682177829, 'epoch': 0.85} + 85%|████████▌ | 443/520 [28:08<04:42, 3.66s/it] 85%|████████▌ | 444/520 [28:11<04:39, 3.67s/it] {'loss': 1.2273, 'grad_norm': 0.00402042406077818, 'learning_rate': 0.011012819118853146, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:11<04:39, 3.67s/it] 86%|████████▌ | 445/520 [28:15<04:35, 3.67s/it] {'loss': 1.1402, 'grad_norm': 0.004184554844216301, 'learning_rate': 0.01073017907563887, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:15<04:35, 3.67s/it] 86%|████████▌ | 446/520 [28:19<04:31, 3.67s/it] {'loss': 1.3694, 'grad_norm': 0.004661851592452794, 'learning_rate': 0.010451007534269908, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:19<04:31, 3.67s/it] 86%|████████▌ | 447/520 [28:22<04:28, 3.68s/it] {'loss': 1.2381, 'grad_norm': 0.004706655387688723, 'learning_rate': 0.010175315341715598, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:22<04:28, 3.68s/it] 86%|████████▌ | 448/520 [28:26<04:24, 3.68s/it] {'loss': 1.2067, 'grad_norm': 0.004324893392198453, 'learning_rate': 0.009903113209758098, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:26<04:24, 3.68s/it] 86%|████████▋ | 449/520 [28:30<04:21, 3.68s/it] {'loss': 1.3274, 'grad_norm': 0.0045856039250796905, 'learning_rate': 0.009634411714576352, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:30<04:21, 3.68s/it] 87%|████████▋ | 450/520 [28:33<04:18, 3.69s/it] {'loss': 1.2478, 'grad_norm': 0.0043242383942948045, 'learning_rate': 0.009369221296335007, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:33<04:18, 3.69s/it] 87%|████████▋ | 451/520 [28:37<04:14, 3.69s/it] {'loss': 1.2421, 'grad_norm': 0.004535012304805949, 'learning_rate': 0.009107552258778906, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:37<04:14, 3.69s/it] 87%|████████▋ | 452/520 [28:41<04:10, 3.68s/it] {'loss': 1.3612, 'grad_norm': 0.005085496449723711, 'learning_rate': 0.008849414768832687, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:41<04:10, 3.68s/it] 87%|████████▋ | 453/520 [28:44<04:06, 3.68s/it] {'loss': 1.3487, 'grad_norm': 0.005017613825310284, 'learning_rate': 0.008594818856205699, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:44<04:06, 3.68s/it] 87%|████████▋ | 454/520 [28:48<04:02, 3.68s/it] {'loss': 1.1586, 'grad_norm': 0.004614739196162782, 'learning_rate': 0.00834377441300238, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:48<04:02, 3.68s/it] 88%|████████▊ | 455/520 [28:52<03:59, 3.68s/it] {'loss': 1.2879, 'grad_norm': 0.004380892303879435, 'learning_rate': 0.008096291193337934, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:52<03:59, 3.68s/it] 88%|████████▊ | 456/520 [28:55<03:55, 3.68s/it] {'loss': 1.2081, 'grad_norm': 0.004315101279968108, 'learning_rate': 0.007852378812959226, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:55<03:55, 3.68s/it] 88%|████████▊ | 457/520 [28:59<03:51, 3.68s/it] {'loss': 1.3141, 'grad_norm': 0.004855159427531839, 'learning_rate': 0.007612046748871327, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:59<03:51, 3.68s/it] 88%|████████▊ | 458/520 [29:03<03:48, 3.69s/it] {'loss': 1.3524, 'grad_norm': 0.004515021518213433, 'learning_rate': 0.007375304338969136, 'epoch': 0.88} + 88%|████████▊ | 458/520 [29:03<03:48, 3.69s/it] 88%|████████▊ | 459/520 [29:06<03:44, 3.69s/it] {'loss': 1.2828, 'grad_norm': 0.00415747918114905, 'learning_rate': 0.007142160781674645, 'epoch': 0.88} + 88%|████████▊ | 459/520 [29:06<03:44, 3.69s/it] 88%|████████▊ | 460/520 [29:10<03:41, 3.69s/it] {'loss': 1.1481, 'grad_norm': 0.00411328839546317, 'learning_rate': 0.006912625135579587, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:10<03:41, 3.69s/it] 89%|████████▊ | 461/520 [29:14<03:37, 3.69s/it] {'loss': 1.4069, 'grad_norm': 0.004477876330256904, 'learning_rate': 0.0066867063190933496, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:14<03:37, 3.69s/it] 89%|████████▉ | 462/520 [29:18<03:34, 3.70s/it] {'loss': 1.4192, 'grad_norm': 0.004321798194290686, 'learning_rate': 0.006464413110096601, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:18<03:34, 3.70s/it] 89%|████████▉ | 463/520 [29:21<03:33, 3.74s/it] {'loss': 1.1145, 'grad_norm': 0.004794540633006412, 'learning_rate': 0.006245754145600091, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:21<03:33, 3.74s/it] 89%|████████▉ | 464/520 [29:25<03:30, 3.76s/it] {'loss': 1.2694, 'grad_norm': 0.004571074147062263, 'learning_rate': 0.006030737921409169, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:25<03:30, 3.76s/it] 89%|████████▉ | 465/520 [29:29<03:28, 3.78s/it] {'loss': 1.3875, 'grad_norm': 0.005043907519298059, 'learning_rate': 0.005819372791793654, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:29<03:28, 3.78s/it] 90%|████████▉ | 466/520 [29:33<03:24, 3.80s/it] {'loss': 1.2467, 'grad_norm': 0.003872673937762756, 'learning_rate': 0.005611666969163243, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:33<03:24, 3.80s/it] 90%|████████▉ | 467/520 [29:37<03:21, 3.80s/it] {'loss': 1.2912, 'grad_norm': 0.004395261824219449, 'learning_rate': 0.005407628523748398, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:37<03:21, 3.80s/it] 90%|█████████ | 468/520 [29:41<03:18, 3.81s/it] {'loss': 1.2322, 'grad_norm': 0.005016347642262827, 'learning_rate': 0.00520726538328683, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:41<03:18, 3.81s/it] 90%|█████████ | 469/520 [29:44<03:14, 3.81s/it] {'loss': 1.2823, 'grad_norm': 0.004526647984196049, 'learning_rate': 0.005010585332715401, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:44<03:14, 3.81s/it] 90%|█████████ | 470/520 [29:48<03:11, 3.82s/it] {'loss': 1.1617, 'grad_norm': 0.0038564649095095397, 'learning_rate': 0.004817596013867765, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:48<03:11, 3.82s/it] 91%|█████████ | 471/520 [29:52<03:07, 3.83s/it] {'loss': 1.1791, 'grad_norm': 0.0044612741098954, 'learning_rate': 0.004628304925177318, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:52<03:07, 3.83s/it] 91%|█████████ | 472/520 [29:56<03:03, 3.83s/it] {'loss': 1.1533, 'grad_norm': 0.004041332494310754, 'learning_rate': 0.004442719421385921, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:56<03:03, 3.83s/it] 91%|█████████ | 473/520 [30:00<03:00, 3.83s/it] {'loss': 1.2108, 'grad_norm': 0.004470410642043608, 'learning_rate': 0.004260846713258193, 'epoch': 0.91} + 91%|█████████ | 473/520 [30:00<03:00, 3.83s/it] 91%|█████████ | 474/520 [30:03<02:55, 3.82s/it] {'loss': 1.3198, 'grad_norm': 0.004168938837280947, 'learning_rate': 0.004082693867301224, 'epoch': 0.91} + 91%|█████████ | 474/520 [30:04<02:55, 3.82s/it] 91%|█████████▏| 475/520 [30:07<02:51, 3.82s/it] {'loss': 1.2367, 'grad_norm': 0.00430675910180022, 'learning_rate': 0.003908267805490051, 'epoch': 0.91} + 91%|█████████▏| 475/520 [30:07<02:51, 3.82s/it] 92%|█████████▏| 476/520 [30:11<02:46, 3.79s/it] {'loss': 1.2073, 'grad_norm': 0.004591787549217243, 'learning_rate': 0.003737575304998797, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:11<02:46, 3.79s/it] 92%|█████████▏| 477/520 [30:15<02:41, 3.76s/it] {'loss': 1.1908, 'grad_norm': 0.005311792166665677, 'learning_rate': 0.003570622997937234, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:15<02:41, 3.76s/it] 92%|█████████▏| 478/520 [30:18<02:37, 3.76s/it] {'loss': 1.1545, 'grad_norm': 0.004137385548435392, 'learning_rate': 0.00340741737109318, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:18<02:37, 3.76s/it] 92%|█████████▏| 479/520 [30:22<02:34, 3.77s/it] {'loss': 1.302, 'grad_norm': 0.004778245433618879, 'learning_rate': 0.003247964765680389, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:22<02:34, 3.77s/it] 92%|█████████▏| 480/520 [30:26<02:31, 3.78s/it] {'loss': 1.3211, 'grad_norm': 0.004871064771270001, 'learning_rate': 0.0030922713770922153, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:26<02:31, 3.78s/it] 92%|█████████▎| 481/520 [30:30<02:28, 3.80s/it] {'loss': 1.3405, 'grad_norm': 0.004573407338035518, 'learning_rate': 0.0029403432546609046, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:30<02:28, 3.80s/it] 93%|█████████▎| 482/520 [30:34<02:24, 3.81s/it] {'loss': 1.3324, 'grad_norm': 0.004442101032666746, 'learning_rate': 0.0027921863014225504, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:34<02:24, 3.81s/it] 93%|█████████▎| 483/520 [30:38<02:20, 3.81s/it] {'loss': 1.2254, 'grad_norm': 0.004695889672850733, 'learning_rate': 0.002647806273887665, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:38<02:20, 3.81s/it] 93%|█████████▎| 484/520 [30:41<02:17, 3.81s/it] {'loss': 1.2268, 'grad_norm': 0.0043066666178514885, 'learning_rate': 0.0025072087818176383, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:41<02:17, 3.81s/it] 93%|█████████▎| 485/520 [30:45<02:13, 3.81s/it] {'loss': 1.1724, 'grad_norm': 0.004100593801868023, 'learning_rate': 0.002370399288006664, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:45<02:13, 3.81s/it] 93%|█████████▎| 486/520 [30:49<02:09, 3.82s/it] {'loss': 1.302, 'grad_norm': 0.00466166451587615, 'learning_rate': 0.0022373831080695463, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:49<02:09, 3.82s/it] 94%|█████████▎| 487/520 [30:53<02:06, 3.82s/it] {'loss': 1.1491, 'grad_norm': 0.004084285353120934, 'learning_rate': 0.0021081654102351635, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:53<02:06, 3.82s/it] 94%|█████████▍| 488/520 [30:57<02:02, 3.82s/it] {'loss': 1.0885, 'grad_norm': 0.00459556977110792, 'learning_rate': 0.0019827512151456175, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:57<02:02, 3.82s/it] 94%|█████████▍| 489/520 [31:00<01:58, 3.81s/it] {'loss': 1.3149, 'grad_norm': 0.0038641317136913237, 'learning_rate': 0.0018611453956612345, 'epoch': 0.94} + 94%|█████████▍| 489/520 [31:00<01:58, 3.81s/it] 94%|█████████▍| 490/520 [31:04<01:54, 3.81s/it] {'loss': 1.2165, 'grad_norm': 0.004320793578351859, 'learning_rate': 0.0017433526766711727, 'epoch': 0.94} + 94%|█████████▍| 490/520 [31:04<01:54, 3.81s/it] 94%|█████████▍| 491/520 [31:08<01:50, 3.83s/it] {'loss': 1.1777, 'grad_norm': 0.004446154775644677, 'learning_rate': 0.0016293776349098677, 'epoch': 0.94} + 94%|█████████▍| 491/520 [31:08<01:50, 3.83s/it] 95%|█████████▍| 492/520 [31:12<01:46, 3.82s/it] {'loss': 1.3014, 'grad_norm': 0.00442818344118658, 'learning_rate': 0.0015192246987791981, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:12<01:46, 3.82s/it] 95%|█████████▍| 493/520 [31:16<01:42, 3.81s/it] {'loss': 1.3738, 'grad_norm': 0.004951287002912545, 'learning_rate': 0.0014128981481764114, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:16<01:42, 3.81s/it] 95%|█████████▌| 494/520 [31:20<01:39, 3.83s/it] {'loss': 1.2383, 'grad_norm': 0.004111763943621405, 'learning_rate': 0.0013104021143278911, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:20<01:39, 3.83s/it] 95%|█████████▌| 495/520 [31:23<01:35, 3.83s/it] {'loss': 1.1865, 'grad_norm': 0.0041405545069394, 'learning_rate': 0.0012117405796285285, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:23<01:35, 3.83s/it] 95%|█████████▌| 496/520 [31:27<01:31, 3.83s/it] {'loss': 1.1111, 'grad_norm': 0.0042310083136302785, 'learning_rate': 0.0011169173774871477, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:27<01:31, 3.83s/it] 96%|█████████▌| 497/520 [31:31<01:27, 3.82s/it] {'loss': 1.2508, 'grad_norm': 0.004101598997297444, 'learning_rate': 0.0010259361921774012, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:31<01:27, 3.82s/it] 96%|█████████▌| 498/520 [31:35<01:24, 3.82s/it] {'loss': 1.1989, 'grad_norm': 0.004698223307730343, 'learning_rate': 0.000938800558694719, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:35<01:24, 3.82s/it] 96%|█████████▌| 499/520 [31:39<01:20, 3.82s/it] {'loss': 1.4007, 'grad_norm': 0.0050890013279669835, 'learning_rate': 0.0008555138626189618, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:39<01:20, 3.82s/it] 96%|█████████▌| 500/520 [31:43<01:16, 3.82s/it] {'loss': 1.3146, 'grad_norm': 0.005238128505744003, 'learning_rate': 0.0007760793399827937, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:43<01:16, 3.82s/it] 96%|█████████▋| 501/520 [31:46<01:12, 3.82s/it] {'loss': 1.3127, 'grad_norm': 0.005296958925091823, 'learning_rate': 0.000700500077146038, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:46<01:12, 3.82s/it] 97%|█████████▋| 502/520 [31:50<01:08, 3.82s/it] {'loss': 1.2295, 'grad_norm': 0.004107653100408361, 'learning_rate': 0.0006287790106757397, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:50<01:08, 3.82s/it] 97%|█████████▋| 503/520 [31:54<01:04, 3.81s/it] {'loss': 1.2811, 'grad_norm': 0.004465629662603885, 'learning_rate': 0.0005609189272320237, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:54<01:04, 3.81s/it] 97%|█████████▋| 504/520 [31:58<01:01, 3.87s/it] {'loss': 1.2289, 'grad_norm': 0.0050316364907797995, 'learning_rate': 0.000496922463459859, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:58<01:01, 3.87s/it] 97%|█████████▋| 505/520 [32:02<00:58, 3.90s/it] {'loss': 1.2722, 'grad_norm': 0.004289704066627011, 'learning_rate': 0.0004367921058866187, 'epoch': 0.97} + 97%|█████████▋| 505/520 [32:02<00:58, 3.90s/it] 97%|█████████▋| 506/520 [32:06<00:54, 3.91s/it] {'loss': 1.1802, 'grad_norm': 0.004774679775192733, 'learning_rate': 0.0003805301908254455, 'epoch': 0.97} + 97%|█████████▋| 506/520 [32:06<00:54, 3.91s/it] 98%|█████████▊| 507/520 [32:10<00:50, 3.92s/it] {'loss': 1.4376, 'grad_norm': 0.004129840423064123, 'learning_rate': 0.0003281389042844918, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:10<00:50, 3.92s/it] 98%|█████████▊| 508/520 [32:14<00:47, 3.92s/it] {'loss': 1.2995, 'grad_norm': 0.0043556877030717115, 'learning_rate': 0.00027962028188198705, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:14<00:47, 3.92s/it] 98%|█████████▊| 509/520 [32:18<00:43, 3.92s/it] {'loss': 1.2704, 'grad_norm': 0.0042365623557679595, 'learning_rate': 0.00023497620876711256, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:18<00:43, 3.92s/it] 98%|█████████▊| 510/520 [32:22<00:39, 3.92s/it] {'loss': 1.23, 'grad_norm': 0.004192097236998007, 'learning_rate': 0.00019420841954681523, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:22<00:39, 3.92s/it] 98%|█████████▊| 511/520 [32:25<00:35, 3.92s/it] {'loss': 1.1991, 'grad_norm': 0.00417259702584787, 'learning_rate': 0.00015731849821833956, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:25<00:35, 3.92s/it] 98%|█████████▊| 512/520 [32:29<00:31, 3.92s/it] {'loss': 1.0817, 'grad_norm': 0.004329077983787171, 'learning_rate': 0.00012430787810776555, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:29<00:31, 3.92s/it] 99%|█████████▊| 513/520 [32:33<00:27, 3.92s/it] {'loss': 1.2841, 'grad_norm': 0.004723942413489481, 'learning_rate': 9.517784181422018e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:33<00:27, 3.92s/it] 99%|█████████▉| 514/520 [32:37<00:23, 3.93s/it] {'loss': 1.2586, 'grad_norm': 0.00394191702104146, 'learning_rate': 6.992952116013917e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:37<00:23, 3.93s/it] 99%|█████████▉| 515/520 [32:41<00:19, 3.87s/it] {'loss': 1.3093, 'grad_norm': 0.005079658277502494, 'learning_rate': 4.856389714723575e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:41<00:19, 3.87s/it] 99%|█████████▉| 516/520 [32:45<00:15, 3.82s/it] {'loss': 1.1889, 'grad_norm': 0.004442441806567359, 'learning_rate': 3.108179991837545e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:45<00:15, 3.82s/it] 99%|█████████▉| 517/520 [32:48<00:11, 3.78s/it] {'loss': 1.3606, 'grad_norm': 0.0054119436798776045, 'learning_rate': 1.7483908725357544e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:48<00:11, 3.78s/it] 100%|█████████▉| 518/520 [32:52<00:07, 3.73s/it] {'loss': 1.2201, 'grad_norm': 0.004394723998704045, 'learning_rate': 7.770751902513862e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:52<00:07, 3.73s/it] 100%|█████████▉| 519/520 [32:56<00:03, 3.70s/it] {'loss': 1.2901, 'grad_norm': 0.004369473630177884, 'learning_rate': 1.9427068461808086e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:56<00:03, 3.70s/it] 100%|██████████| 520/520 [33:00<00:00, 3.95s/it] {'loss': 1.3766, 'grad_norm': 0.0055271264528676525, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [33:00<00:00, 3.95s/it] {'train_runtime': 1980.6775, 'train_samples_per_second': 33.589, 'train_steps_per_second': 0.263, 'train_loss': 1.474410092830658, 'epoch': 1.0} + 100%|██████████| 520/520 [33:00<00:00, 3.95s/it] 100%|██████████| 520/520 [33:00<00:00, 3.81s/it] +[2025-10-10 10:28:24,286] [INFO] [launch.py:348:main] Process 751347 exits successfully. +[2025-10-10 10:28:24,287] [INFO] [launch.py:348:main] Process 751346 exits successfully. +[2025-10-10 10:28:25,288] [INFO] [launch.py:348:main] Process 751348 exits successfully. +[2025-10-10 10:28:25,289] [INFO] [launch.py:348:main] Process 751345 exits successfully. +[2025-10-10 10:28:25,290] [INFO] [launch.py:348:main] Process 751349 exits successfully. +[2025-10-10 10:28:25,290] [INFO] [launch.py:348:main] Process 751350 exits successfully. +[2025-10-10 10:28:25,290] [INFO] [launch.py:348:main] Process 751351 exits successfully. +[2025-10-10 10:28:29,295] [INFO] [launch.py:348:main] Process 751344 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.5_2e-1_connector-1.0_1.5_2e-1_ablation_20251010_095352.log +Timestamp: 2025-10-10 10:28:31 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation_20251010_102831.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation_20251010_102831.log new file mode 100644 index 0000000000000000000000000000000000000000..6033bc5c0abe001e76bfdc4c3eef65a88f1b7bf0 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation_20251010_102831.log @@ -0,0 +1,865 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation_20251010_102831.log +Timestamp: 2025-10-10 10:28:31 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 10:28:34,454] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:28:37,381] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 10:28:37,383] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 1.7 --temperature_mlp_text 1.7 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 1.7 --temperature_mlp_vision 1.7 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 1.7 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 10:28:39,957] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:28:41,022] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 10:28:41,022] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 10:28:41,022] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 10:28:41,022] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 10:28:41,022] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 10:28:41,022] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 10:28:41,023] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 10:28:41,025] [INFO] [launch.py:253:main] process 773623 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:28:41,027] [INFO] [launch.py:253:main] process 773624 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:28:41,029] [INFO] [launch.py:253:main] process 773625 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:28:41,031] [INFO] [launch.py:253:main] process 773626 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:28:41,033] [INFO] [launch.py:253:main] process 773627 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:28:41,034] [INFO] [launch.py:253:main] process 773628 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:28:41,036] [INFO] [launch.py:253:main] process 773629 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:28:41,038] [INFO] [launch.py:253:main] process 773630 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 10:28:47,648] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:28:47,860] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:28:47,876] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:28:47,920] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:28:47,966] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:28:47,967] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:28:47,972] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:28:47,982] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:28:48,050] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:28:48,264] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:28:48,280] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:28:48,322] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:28:48,322] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 10:28:48,367] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:28:48,368] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:28:48,387] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:28:48,390] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.7, 'temperature_mlp': 1.7, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.7, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.7, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.7, + "temperature_mlp": 1.7, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:773623:773623 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773623:773623 [0] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773623:773623 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:773623:773623 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:773623:773623 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:773623:773623 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test2-worker-0:773625:773625 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:773625:773625 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773625:773625 [2] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773630:773630 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:773630:773630 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773625:773625 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:773625:773625 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:773625:773625 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:773630:773630 [7] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773630:773630 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:773630:773630 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:773630:773630 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:773629:773629 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:773629:773629 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773629:773629 [6] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773629:773629 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:773629:773629 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:773629:773629 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:773628:773628 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:773628:773628 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773628:773628 [5] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773626:773626 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:773628:773628 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:773626:773626 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773628:773628 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:773628:773628 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:773626:773626 [3] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773626:773626 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:773626:773626 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:773626:773626 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:773624:773624 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:773624:773624 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773624:773624 [1] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773624:773624 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:773624:773624 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:773624:773624 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test2-worker-0:773627:773627 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test2-worker-0:773627:773627 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773627:773627 [4] NCCL INFO Bootstrap : Using eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773627:773627 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test2-worker-0:773627:773627 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test2-worker-0:773627:773627 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.152.48<0> +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Using network Socket +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO ncclCommInitRank comm 0x55b10e6da9f0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x2dee143de15bd501 - Init START +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO ncclCommInitRank comm 0x55c341cac950 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x2dee143de15bd501 - Init START +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO ncclCommInitRank comm 0x55ff2e30acc0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x2dee143de15bd501 - Init START +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO ncclCommInitRank comm 0x560b552e6ec0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x2dee143de15bd501 - Init START +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO ncclCommInitRank comm 0x555ad9f49160 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x2dee143de15bd501 - Init START +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO ncclCommInitRank comm 0x55eacda0fc20 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x2dee143de15bd501 - Init START +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO ncclCommInitRank comm 0x557c73508d50 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x2dee143de15bd501 - Init START +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO ncclCommInitRank comm 0x55cc0c66c450 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x2dee143de15bd501 - Init START +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO comm 0x557c73508d50 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO comm 0x55c341cac950 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO comm 0x55ff2e30acc0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO comm 0x55eacda0fc20 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO comm 0x55b10e6da9f0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO comm 0x55cc0c66c450 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO comm 0x560b552e6ec0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO comm 0x555ad9f49160 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Connected all rings +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO Connected all trees +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:773628:775247 [5] NCCL INFO ncclCommInitRank comm 0x555ad9f49160 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x2dee143de15bd501 - Init COMPLETE +ywang29-vrdb-test2-worker-0:773629:775246 [6] NCCL INFO ncclCommInitRank comm 0x557c73508d50 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x2dee143de15bd501 - Init COMPLETE +ywang29-vrdb-test2-worker-0:773627:775250 [4] NCCL INFO ncclCommInitRank comm 0x55b10e6da9f0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x2dee143de15bd501 - Init COMPLETE +ywang29-vrdb-test2-worker-0:773630:775244 [7] NCCL INFO ncclCommInitRank comm 0x55c341cac950 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x2dee143de15bd501 - Init COMPLETE +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:773626:775248 [3] NCCL INFO ncclCommInitRank comm 0x560b552e6ec0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x2dee143de15bd501 - Init COMPLETE +ywang29-vrdb-test2-worker-0:773624:775249 [1] NCCL INFO ncclCommInitRank comm 0x55ff2e30acc0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x2dee143de15bd501 - Init COMPLETE +ywang29-vrdb-test2-worker-0:773623:775243 [0] NCCL INFO ncclCommInitRank comm 0x55cc0c66c450 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x2dee143de15bd501 - Init COMPLETE +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test2-worker-0:773625:775245 [2] NCCL INFO ncclCommInitRank comm 0x55eacda0fc20 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x2dee143de15bd501 - Init COMPLETE +[2025-10-10 10:29:31,214] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[E ProcessGroupNCCL.cpp:474] [Rank 7] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800516 milliseconds before timing out. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +[E ProcessGroupNCCL.cpp:474] [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800575 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 5] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800634 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800621 milliseconds before timing out. +[E ProcessGroupNCCL.cpp:474] [Rank 4] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800744 milliseconds before timing out. +ywang29-vrdb-test2-worker-0:773630:775253 [7] NCCL INFO [Service thread] Connection closed by localRank 7 +ywang29-vrdb-test2-worker-0:773630:774742 [7] NCCL INFO comm 0x55c341cac950 rank 7 nranks 8 cudaDev 7 busId a01d0 - Abort COMPLETE +[E ProcessGroupNCCL.cpp:488] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data. +[E ProcessGroupNCCL.cpp:494] To avoid data inconsistency, we are taking the entire process down. +[E ProcessGroupNCCL.cpp:915] [Rank 7] NCCL watchdog thread terminated with exception: [Rank 7] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800516 milliseconds before timing out. +terminate called after throwing an instance of 'std::runtime_error' + what(): [Rank 7] NCCL watchdog thread terminated with exception: [Rank 7] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800516 milliseconds before timing out. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +ywang29-vrdb-test2-worker-0:773624:775251 [1] NCCL INFO [Service thread] Connection closed by localRank 1 +ywang29-vrdb-test2-worker-0:773625:775261 [2] NCCL INFO [Service thread] Connection closed by localRank 2 +ywang29-vrdb-test2-worker-0:773627:775254 [4] NCCL INFO [Service thread] Connection closed by localRank 4 +ywang29-vrdb-test2-worker-0:773628:775263 [5] NCCL INFO [Service thread] Connection closed by localRank 5 +[2025-10-10 16:20:44,230] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +ywang29-vrdb-test2-worker-0:773624:774724 [1] NCCL INFO comm 0x55ff2e30acc0 rank 1 nranks 8 cudaDev 1 busId 101d0 - Abort COMPLETE +[E ProcessGroupNCCL.cpp:488] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data. +[E ProcessGroupNCCL.cpp:494] To avoid data inconsistency, we are taking the entire process down. +[E ProcessGroupNCCL.cpp:915] [Rank 1] NCCL watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800621 milliseconds before timing out. +terminate called after throwing an instance of 'std::runtime_error' + what(): [Rank 1] NCCL watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800621 milliseconds before timing out. +ywang29-vrdb-test2-worker-0:773625:774723 [2] NCCL INFO comm 0x55eacda0fc20 rank 2 nranks 8 cudaDev 2 busId 201c0 - Abort COMPLETE +[E ProcessGroupNCCL.cpp:488] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data. +[E ProcessGroupNCCL.cpp:494] To avoid data inconsistency, we are taking the entire process down. +[E ProcessGroupNCCL.cpp:915] [Rank 2] NCCL watchdog thread terminated with exception: [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800575 milliseconds before timing out. +terminate called after throwing an instance of 'std::runtime_error' + what(): [Rank 2] NCCL watchdog thread terminated with exception: [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800575 milliseconds before timing out. +ywang29-vrdb-test2-worker-0:773628:774728 [5] NCCL INFO comm 0x555ad9f49160 rank 5 nranks 8 cudaDev 5 busId 901d0 - Abort COMPLETE +[E ProcessGroupNCCL.cpp:488] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data. +[E ProcessGroupNCCL.cpp:494] To avoid data inconsistency, we are taking the entire process down. +[E ProcessGroupNCCL.cpp:915] [Rank 5] NCCL watchdog thread terminated with exception: [Rank 5] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800634 milliseconds before timing out. +terminate called after throwing an instance of 'std::runtime_error' + what(): [Rank 5] NCCL watchdog thread terminated with exception: [Rank 5] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800634 milliseconds before timing out. +ywang29-vrdb-test2-worker-0:773627:774727 [4] NCCL INFO comm 0x55b10e6da9f0 rank 4 nranks 8 cudaDev 4 busId 901c0 - Abort COMPLETE +[E ProcessGroupNCCL.cpp:488] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data. +[E ProcessGroupNCCL.cpp:494] To avoid data inconsistency, we are taking the entire process down. +[E ProcessGroupNCCL.cpp:915] [Rank 4] NCCL watchdog thread terminated with exception: [Rank 4] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800744 milliseconds before timing out. +terminate called after throwing an instance of 'std::runtime_error' + what(): [Rank 4] NCCL watchdog thread terminated with exception: [Rank 4] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=1858, OpType=BROADCAST, NumelIn=677376, NumelOut=677376, Timeout(ms)=1800000) ran for 1800744 milliseconds before timing out. +[2025-10-10 16:20:55,467] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 773623 +[2025-10-10 16:20:55,845] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 773624 +[2025-10-10 16:20:59,223] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 773625 +[2025-10-10 16:20:59,225] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 773626 +[2025-10-10 16:20:59,641] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 773627 +[2025-10-10 16:20:59,735] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 773628 +[2025-10-10 16:21:00,392] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 773629 +[2025-10-10 16:21:00,771] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 773630 +[2025-10-10 16:21:00,771] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.7', '--temperature_mlp_text', '1.7', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.7', '--temperature_mlp_vision', '1.7', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.7', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = -6 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.7_2e-1_connector-1.0_1.7_2e-1_ablation_20251010_102831.log +Timestamp: 2025-10-10 16:21:02 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation_20251010_162102.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation_20251010_162102.log new file mode 100644 index 0000000000000000000000000000000000000000..fa1d56f926e63c0fd60ced6bb488fec8c4afd6a6 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation_20251010_162102.log @@ -0,0 +1,169 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation_20251010_162102.log +Timestamp: 2025-10-10 16:21:02 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 16:21:04,987] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 16:21:08,361] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 16:21:08,363] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 2e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 1.0 --temperature_attn_text 1.9 --temperature_mlp_text 1.9 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 1.0 --temperature_attn_vision 1.9 --temperature_mlp_vision 1.9 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 1.0 --temperature_connector 1.9 --backward_type_connector normal --mm_projector_lr 2e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 16:21:10,949] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 16:21:12,019] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 16:21:12,019] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 16:21:12,020] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 16:21:12,020] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 16:21:12,020] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 16:21:12,020] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 16:21:12,020] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 16:21:12,022] [INFO] [launch.py:253:main] process 780015 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 16:21:12,024] [INFO] [launch.py:253:main] process 780016 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 16:21:12,026] [INFO] [launch.py:253:main] process 780017 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 16:21:12,028] [INFO] [launch.py:253:main] process 780018 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 16:21:12,030] [INFO] [launch.py:253:main] process 780019 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 16:21:12,032] [INFO] [launch.py:253:main] process 780020 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 16:21:12,034] [INFO] [launch.py:253:main] process 780021 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 16:21:12,036] [INFO] [launch.py:253:main] process 780022 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-1.0_1.9_2e-1_connector-1.0_1.9_2e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '2e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '1.0', '--temperature_attn_text', '1.9', '--temperature_mlp_text', '1.9', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '1.0', '--temperature_attn_vision', '1.9', '--temperature_mlp_vision', '1.9', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '1.0', '--temperature_connector', '1.9', '--backward_type_connector', 'normal', '--mm_projector_lr', '2e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 16:21:18,873] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 16:21:19,003] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 16:21:19,097] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 16:21:19,190] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 16:21:19,217] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 16:21:19,225] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 16:21:19,227] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 16:21:19,230] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 16:21:19,397] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 16:21:19,406] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 16:21:19,494] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 16:21:19,589] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 16:21:19,589] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 16:21:19,619] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 16:21:19,626] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 16:21:19,632] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 16:21:19,633] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 1.9, 'temperature_mlp': 1.9, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 1.9, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 1.9, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 1.9, + "temperature_mlp": 1.9, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_053812.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_053812.log new file mode 100644 index 0000000000000000000000000000000000000000..e503558d919a16e0adfc65980813073bcf46ee83 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_053812.log @@ -0,0 +1,1989 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_053812.log +Timestamp: 2025-10-10 05:38:12 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:38:15,166] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:38:18,425] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 05:38:18,427] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:38:21,060] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:38:22,079] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 05:38:22,079] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 05:38:22,079] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 05:38:22,079] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 05:38:22,079] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 05:38:22,079] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 05:38:22,079] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 05:38:22,082] [INFO] [launch.py:253:main] process 1761915 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:38:22,084] [INFO] [launch.py:253:main] process 1761916 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:38:22,086] [INFO] [launch.py:253:main] process 1761917 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:38:22,088] [INFO] [launch.py:253:main] process 1761918 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:38:22,090] [INFO] [launch.py:253:main] process 1761919 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:38:22,092] [INFO] [launch.py:253:main] process 1761920 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:38:22,094] [INFO] [launch.py:253:main] process 1761921 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:38:22,096] [INFO] [launch.py:253:main] process 1761922 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:38:28,757] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:38:28,815] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:38:28,844] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:38:28,988] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:38:28,990] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:38:29,004] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:38:29,006] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:38:29,011] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:38:29,283] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:38:29,283] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 05:38:29,283] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:38:29,283] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:38:29,405] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:38:29,412] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:38:29,421] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:38:29,422] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:38:29,424] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1761915:1761915 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761915:1761915 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761915:1761915 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1761915:1761915 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1761915:1761915 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1761915:1761915 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1761919:1761919 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1761919:1761919 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761919:1761919 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761919:1761919 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1761919:1761919 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1761919:1761919 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1761921:1761921 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1761921:1761921 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761921:1761921 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761921:1761921 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1761921:1761921 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1761921:1761921 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1761916:1761916 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1761916:1761916 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761916:1761916 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761916:1761916 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1761916:1761916 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1761916:1761916 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1761917:1761917 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1761917:1761917 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761917:1761917 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761917:1761917 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1761917:1761917 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1761917:1761917 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1761922:1761922 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1761922:1761922 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761922:1761922 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761922:1761922 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1761922:1761922 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1761922:1761922 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1761920:1761920 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1761920:1761920 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761920:1761920 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761920:1761920 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1761920:1761920 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1761920:1761920 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1761918:1761918 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1761918:1761918 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761918:1761918 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761918:1761918 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1761918:1761918 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1761918:1761918 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO ncclCommInitRank comm 0x560cd6736d80 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x2521aa680a786a17 - Init START +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO ncclCommInitRank comm 0x5583bf2c4fd0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x2521aa680a786a17 - Init START +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO ncclCommInitRank comm 0x5642da38e5a0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x2521aa680a786a17 - Init START +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO ncclCommInitRank comm 0x560bcb1d0be0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x2521aa680a786a17 - Init START +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO ncclCommInitRank comm 0x55a6a4af6d80 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x2521aa680a786a17 - Init START +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO ncclCommInitRank comm 0x55c90cd5ab90 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x2521aa680a786a17 - Init START +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO ncclCommInitRank comm 0x55c997662210 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x2521aa680a786a17 - Init START +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO ncclCommInitRank comm 0x560b062538c0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x2521aa680a786a17 - Init START +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO comm 0x5583bf2c4fd0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO comm 0x55a6a4af6d80 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO comm 0x5642da38e5a0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO comm 0x560b062538c0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO comm 0x55c997662210 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO comm 0x560bcb1d0be0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO comm 0x560cd6736d80 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO comm 0x55c90cd5ab90 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1761919:1763572 [4] NCCL INFO ncclCommInitRank comm 0x55c997662210 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x2521aa680a786a17 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761921:1763570 [6] NCCL INFO ncclCommInitRank comm 0x55a6a4af6d80 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x2521aa680a786a17 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761920:1763574 [5] NCCL INFO ncclCommInitRank comm 0x55c90cd5ab90 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x2521aa680a786a17 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1761922:1763575 [7] NCCL INFO ncclCommInitRank comm 0x560cd6736d80 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x2521aa680a786a17 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1761918:1763576 [3] NCCL INFO ncclCommInitRank comm 0x5583bf2c4fd0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x2521aa680a786a17 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1761915:1763549 [0] NCCL INFO ncclCommInitRank comm 0x560bcb1d0be0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x2521aa680a786a17 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1761916:1763573 [1] NCCL INFO ncclCommInitRank comm 0x560b062538c0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x2521aa680a786a17 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761917:1763571 [2] NCCL INFO ncclCommInitRank comm 0x5642da38e5a0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x2521aa680a786a17 - Init COMPLETE +[2025-10-10 05:39:19,472] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 05:39:22,970] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 05:39:36,056 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 05:39:36,062 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:001->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO comm 0x7f022006ab70 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1761920:1768602 [5] NCCL INFO ncclCommInitRank comm 0x7f9d9c06afc0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xcdac950e8850e4dc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761922:1768603 [7] NCCL INFO ncclCommInitRank comm 0x7f1bf406aa30 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xcdac950e8850e4dc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761918:1768601 [3] NCCL INFO ncclCommInitRank comm 0x7f022006ab70 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xcdac950e8850e4dc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761916:1768605 [1] NCCL INFO ncclCommInitRank comm 0x7fea6406ac90 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xcdac950e8850e4dc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761921:1768604 [6] NCCL INFO ncclCommInitRank comm 0x7f72e806b4d0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xcdac950e8850e4dc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761919:1768606 [4] NCCL INFO ncclCommInitRank comm 0x7f042406ac80 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xcdac950e8850e4dc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761915:1768600 [0] NCCL INFO ncclCommInitRank comm 0x7f0b2c06b290 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xcdac950e8850e4dc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1761917:1768607 [2] NCCL INFO ncclCommInitRank comm 0x7f127c06a930 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xcdac950e8850e4dc - Init COMPLETE + 0%| | 1/520 [00:12<1:45:27, 12.19s/it] {'loss': 2.0453, 'grad_norm': 0.004835537551997467, 'learning_rate': 0.0625, 'epoch': 0.0} + 0%| | 1/520 [00:12<1:45:27, 12.19s/it] 0%| | 2/520 [00:15<1:02:12, 7.21s/it] {'loss': 2.0549, 'grad_norm': 0.005249475236266035, 'learning_rate': 0.125, 'epoch': 0.0} + 0%| | 2/520 [00:15<1:02:12, 7.21s/it] 1%| | 3/520 [00:19<48:27, 5.62s/it] {'loss': 2.1899, 'grad_norm': 0.006005513005961334, 'learning_rate': 0.1875, 'epoch': 0.01} + 1%| | 3/520 [00:19<48:27, 5.62s/it] 1%| | 4/520 [00:23<42:11, 4.91s/it] {'loss': 1.6695, 'grad_norm': 0.0014914972939722238, 'learning_rate': 0.25, 'epoch': 0.01} + 1%| | 4/520 [00:23<42:11, 4.91s/it] 1%| | 5/520 [00:27<38:45, 4.51s/it] {'loss': 1.6619, 'grad_norm': 0.0008050874348677237, 'learning_rate': 0.3125, 'epoch': 0.01} + 1%| | 5/520 [00:27<38:45, 4.51s/it] 1%| | 6/520 [00:31<36:33, 4.27s/it] {'loss': 1.3806, 'grad_norm': 0.00063876144449939, 'learning_rate': 0.375, 'epoch': 0.01} + 1%| | 6/520 [00:31<36:33, 4.27s/it] 1%|▏ | 7/520 [00:34<35:01, 4.10s/it] {'loss': 1.4168, 'grad_norm': 0.0009225322974146666, 'learning_rate': 0.4375, 'epoch': 0.01} + 1%|▏ | 7/520 [00:34<35:01, 4.10s/it] 2%|▏ | 8/520 [00:39<35:28, 4.16s/it] {'loss': 1.4576, 'grad_norm': 0.001131874986407773, 'learning_rate': 0.5, 'epoch': 0.02} + 2%|▏ | 8/520 [00:39<35:28, 4.16s/it] 2%|▏ | 9/520 [00:43<35:41, 4.19s/it] {'loss': 1.534, 'grad_norm': 0.002093250155238189, 'learning_rate': 0.5625, 'epoch': 0.02} + 2%|▏ | 9/520 [00:43<35:41, 4.19s/it] 2%|▏ | 10/520 [00:47<34:47, 4.09s/it] {'loss': 1.4043, 'grad_norm': 0.0030580153903309397, 'learning_rate': 0.625, 'epoch': 0.02} + 2%|▏ | 10/520 [00:47<34:47, 4.09s/it] 2%|▏ | 11/520 [00:51<34:04, 4.02s/it] {'loss': 1.569, 'grad_norm': 0.004082058370263655, 'learning_rate': 0.6875, 'epoch': 0.02} + 2%|▏ | 11/520 [00:51<34:04, 4.02s/it] 2%|▏ | 12/520 [00:54<33:12, 3.92s/it] {'loss': 1.6118, 'grad_norm': 0.00673348350238654, 'learning_rate': 0.75, 'epoch': 0.02} + 2%|▏ | 12/520 [00:54<33:12, 3.92s/it][2025-10-10 05:40:39,592] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [00:59<34:19, 4.06s/it] {'loss': 2.1658, 'grad_norm': 0.026021564176843946, 'learning_rate': 0.8125, 'epoch': 0.03} + 2%|▎ | 13/520 [00:59<34:19, 4.06s/it] 3%|▎ | 14/520 [01:02<33:30, 3.97s/it] {'loss': 2.7028, 'grad_norm': 0.04189714435465475, 'learning_rate': 0.875, 'epoch': 0.03} + 3%|▎ | 14/520 [01:02<33:30, 3.97s/it] 3%|▎ | 15/520 [01:06<33:21, 3.96s/it] {'loss': 2.524, 'grad_norm': 0.04959478835321647, 'learning_rate': 0.9375, 'epoch': 0.03} + 3%|▎ | 15/520 [01:06<33:21, 3.96s/it] 3%|▎ | 16/520 [01:10<32:44, 3.90s/it] {'loss': 9.6642, 'grad_norm': 1.0737543125476006, 'learning_rate': 1.0, 'epoch': 0.03} + 3%|▎ | 16/520 [01:10<32:44, 3.90s/it] 3%|▎ | 17/520 [01:14<32:15, 3.85s/it] {'loss': 4.5529, 'grad_norm': 2.2306390893458574, 'learning_rate': 0.999990286465769, 'epoch': 0.03} + 3%|▎ | 17/520 [01:14<32:15, 3.85s/it] 3%|▎ | 18/520 [01:18<31:56, 3.82s/it] {'loss': 3.7547, 'grad_norm': 0.1506636376983561, 'learning_rate': 0.9999611462404874, 'epoch': 0.03} + 3%|▎ | 18/520 [01:18<31:56, 3.82s/it] 4%|▎ | 19/520 [01:21<31:36, 3.79s/it] {'loss': 6.2508, 'grad_norm': 0.20327944502724202, 'learning_rate': 0.9999125804563732, 'epoch': 0.04} + 4%|▎ | 19/520 [01:21<31:36, 3.79s/it] 4%|▍ | 20/520 [01:25<31:26, 3.77s/it] {'loss': 5.5821, 'grad_norm': 0.44397552219639924, 'learning_rate': 0.9998445910004081, 'epoch': 0.04} + 4%|▍ | 20/520 [01:25<31:26, 3.77s/it] 4%|▍ | 21/520 [01:29<31:17, 3.76s/it] {'loss': 6.796, 'grad_norm': 0.10351866573314145, 'learning_rate': 0.9997571805142638, 'epoch': 0.04} + 4%|▍ | 21/520 [01:29<31:17, 3.76s/it] 4%|▍ | 22/520 [01:33<31:07, 3.75s/it] {'loss': 4.2706, 'grad_norm': 0.03302696131089083, 'learning_rate': 0.9996503523941993, 'epoch': 0.04} + 4%|▍ | 22/520 [01:33<31:07, 3.75s/it] 4%|▍ | 23/520 [01:36<31:08, 3.76s/it] {'loss': 3.4725, 'grad_norm': 0.018511140120145052, 'learning_rate': 0.999524110790929, 'epoch': 0.04} + 4%|▍ | 23/520 [01:36<31:08, 3.76s/it] 5%|▍ | 24/520 [01:40<30:57, 3.74s/it] {'loss': 5.0689, 'grad_norm': 0.04570437427493692, 'learning_rate': 0.9993784606094611, 'epoch': 0.05} + 5%|▍ | 24/520 [01:40<30:57, 3.74s/it] 5%|▍ | 25/520 [01:44<30:52, 3.74s/it] {'loss': 3.1326, 'grad_norm': 0.010978569544290636, 'learning_rate': 0.9992134075089083, 'epoch': 0.05} + 5%|▍ | 25/520 [01:44<30:52, 3.74s/it] 5%|▌ | 26/520 [01:47<30:48, 3.74s/it] {'loss': 2.9896, 'grad_norm': 0.007717485745105362, 'learning_rate': 0.999028957902266, 'epoch': 0.05} + 5%|▌ | 26/520 [01:47<30:48, 3.74s/it] 5%|▌ | 27/520 [01:51<30:45, 3.74s/it] {'loss': 2.6482, 'grad_norm': 0.00609984312429171, 'learning_rate': 0.9988251189561644, 'epoch': 0.05} + 5%|▌ | 27/520 [01:51<30:45, 3.74s/it] 5%|▌ | 28/520 [01:55<30:38, 3.74s/it] {'loss': 2.4725, 'grad_norm': 0.003550498037044286, 'learning_rate': 0.99860189859059, 'epoch': 0.05} + 5%|▌ | 28/520 [01:55<30:38, 3.74s/it] 6%|▌ | 29/520 [01:59<30:36, 3.74s/it] {'loss': 2.4235, 'grad_norm': 0.0041495754867900475, 'learning_rate': 0.9983593054785775, 'epoch': 0.06} + 6%|▌ | 29/520 [01:59<30:36, 3.74s/it] 6%|▌ | 30/520 [02:02<30:28, 3.73s/it] {'loss': 3.7164, 'grad_norm': 0.009561353593824633, 'learning_rate': 0.9980973490458728, 'epoch': 0.06} + 6%|▌ | 30/520 [02:02<30:28, 3.73s/it] 6%|▌ | 31/520 [02:06<30:28, 3.74s/it] {'loss': 2.4158, 'grad_norm': 0.002824447218960478, 'learning_rate': 0.9978160394705669, 'epoch': 0.06} + 6%|▌ | 31/520 [02:06<30:28, 3.74s/it] 6%|▌ | 32/520 [02:10<30:16, 3.72s/it] {'loss': 4.5063, 'grad_norm': 0.012391987605139218, 'learning_rate': 0.9975153876827008, 'epoch': 0.06} + 6%|▌ | 32/520 [02:10<30:16, 3.72s/it] 6%|▋ | 33/520 [02:14<30:16, 3.73s/it] {'loss': 2.2876, 'grad_norm': 0.0033690592302126976, 'learning_rate': 0.9971954053638399, 'epoch': 0.06} + 6%|▋ | 33/520 [02:14<30:16, 3.73s/it] 7%|▋ | 34/520 [02:17<30:08, 3.72s/it] {'loss': 2.2224, 'grad_norm': 0.0033477103754058725, 'learning_rate': 0.9968561049466214, 'epoch': 0.07} + 7%|▋ | 34/520 [02:17<30:08, 3.72s/it] 7%|▋ | 35/520 [02:21<29:59, 3.71s/it] {'loss': 2.2343, 'grad_norm': 0.003376057949965158, 'learning_rate': 0.9964974996142697, 'epoch': 0.07} + 7%|▋ | 35/520 [02:21<29:59, 3.71s/it] 7%|▋ | 36/520 [02:25<29:52, 3.70s/it] {'loss': 2.3697, 'grad_norm': 0.00267639145803174, 'learning_rate': 0.9961196033000861, 'epoch': 0.07} + 7%|▋ | 36/520 [02:25<29:52, 3.70s/it] 7%|▋ | 37/520 [02:28<29:46, 3.70s/it] {'loss': 2.9891, 'grad_norm': 0.01049056854195849, 'learning_rate': 0.9957224306869052, 'epoch': 0.07} + 7%|▋ | 37/520 [02:28<29:46, 3.70s/it] 7%|▋ | 38/520 [02:32<29:37, 3.69s/it] {'loss': 2.4022, 'grad_norm': 0.0023276506351146086, 'learning_rate': 0.9953059972065263, 'epoch': 0.07} + 7%|▋ | 38/520 [02:32<29:37, 3.69s/it] 8%|▊ | 39/520 [02:36<29:37, 3.70s/it] {'loss': 2.2173, 'grad_norm': 0.0032635582557704293, 'learning_rate': 0.994870319039113, 'epoch': 0.07} + 8%|▊ | 39/520 [02:36<29:37, 3.70s/it] 8%|▊ | 40/520 [02:39<29:28, 3.68s/it] {'loss': 2.1659, 'grad_norm': 0.001729581887283842, 'learning_rate': 0.9944154131125642, 'epoch': 0.08} + 8%|▊ | 40/520 [02:39<29:28, 3.68s/it] 8%|▊ | 41/520 [02:43<29:28, 3.69s/it] {'loss': 2.1556, 'grad_norm': 0.002001546262279882, 'learning_rate': 0.9939412971018573, 'epoch': 0.08} + 8%|▊ | 41/520 [02:43<29:28, 3.69s/it] 8%|▊ | 42/520 [02:47<29:30, 3.70s/it] {'loss': 2.2515, 'grad_norm': 0.0021981858511840834, 'learning_rate': 0.9934479894283605, 'epoch': 0.08} + 8%|▊ | 42/520 [02:47<29:30, 3.70s/it] 8%|▊ | 43/520 [02:51<29:26, 3.70s/it] {'loss': 2.7772, 'grad_norm': 0.004528349813887273, 'learning_rate': 0.9929355092591179, 'epoch': 0.08} + 8%|▊ | 43/520 [02:51<29:26, 3.70s/it] 8%|▊ | 44/520 [02:54<29:21, 3.70s/it] {'loss': 2.8933, 'grad_norm': 0.0044449990886970695, 'learning_rate': 0.9924038765061041, 'epoch': 0.08} + 8%|▊ | 44/520 [02:54<29:21, 3.70s/it] 9%|▊ | 45/520 [02:58<29:16, 3.70s/it] {'loss': 2.1091, 'grad_norm': 0.002180948732199244, 'learning_rate': 0.9918531118254507, 'epoch': 0.09} + 9%|▊ | 45/520 [02:58<29:16, 3.70s/it] 9%|▉ | 46/520 [03:02<29:14, 3.70s/it] {'loss': 2.8606, 'grad_norm': 0.005440242831437211, 'learning_rate': 0.9912832366166442, 'epoch': 0.09} + 9%|▉ | 46/520 [03:02<29:14, 3.70s/it] 9%|▉ | 47/520 [03:05<29:19, 3.72s/it] {'loss': 2.13, 'grad_norm': 0.0015259660088569912, 'learning_rate': 0.9906942730216939, 'epoch': 0.09} + 9%|▉ | 47/520 [03:05<29:19, 3.72s/it] 9%|▉ | 48/520 [03:09<29:12, 3.71s/it] {'loss': 2.084, 'grad_norm': 0.0018061571148933354, 'learning_rate': 0.9900862439242719, 'epoch': 0.09} + 9%|▉ | 48/520 [03:09<29:12, 3.71s/it] 9%|▉ | 49/520 [03:13<29:04, 3.70s/it] {'loss': 2.0541, 'grad_norm': 0.0015274404475868364, 'learning_rate': 0.9894591729488242, 'epoch': 0.09} + 9%|▉ | 49/520 [03:13<29:04, 3.70s/it] 10%|▉ | 50/520 [03:16<28:57, 3.70s/it] {'loss': 2.0497, 'grad_norm': 0.001435832248644795, 'learning_rate': 0.9888130844596523, 'epoch': 0.1} + 10%|▉ | 50/520 [03:16<28:57, 3.70s/it] 10%|▉ | 51/520 [03:20<28:46, 3.68s/it] {'loss': 1.9186, 'grad_norm': 0.0016424787063891997, 'learning_rate': 0.9881480035599667, 'epoch': 0.1} + 10%|▉ | 51/520 [03:20<28:46, 3.68s/it] 10%|█ | 52/520 [03:24<28:49, 3.69s/it] {'loss': 2.1049, 'grad_norm': 0.001675257452357229, 'learning_rate': 0.9874639560909118, 'epoch': 0.1} + 10%|█ | 52/520 [03:24<28:49, 3.69s/it] 10%|█ | 53/520 [03:28<28:46, 3.70s/it] {'loss': 2.1113, 'grad_norm': 0.0013170742711798426, 'learning_rate': 0.9867609686305616, 'epoch': 0.1} + 10%|█ | 53/520 [03:28<28:46, 3.70s/it] 10%|█ | 54/520 [03:31<28:44, 3.70s/it] {'loss': 1.8971, 'grad_norm': 0.0012221971457935011, 'learning_rate': 0.9860390684928872, 'epoch': 0.1} + 10%|█ | 54/520 [03:31<28:44, 3.70s/it] 11%|█ | 55/520 [03:35<28:45, 3.71s/it] {'loss': 1.9348, 'grad_norm': 0.0014819610071084319, 'learning_rate': 0.9852982837266955, 'epoch': 0.11} + 11%|█ | 55/520 [03:35<28:45, 3.71s/it] 11%|█ | 56/520 [03:39<28:42, 3.71s/it] {'loss': 2.0732, 'grad_norm': 0.0013262667614647643, 'learning_rate': 0.984538643114539, 'epoch': 0.11} + 11%|█ | 56/520 [03:39<28:42, 3.71s/it] 11%|█ | 57/520 [03:42<28:44, 3.72s/it] {'loss': 1.9133, 'grad_norm': 0.0013241322835404578, 'learning_rate': 0.9837601761715982, 'epoch': 0.11} + 11%|█ | 57/520 [03:42<28:44, 3.72s/it] 11%|█ | 58/520 [03:46<28:59, 3.77s/it] {'loss': 2.051, 'grad_norm': 0.0010989956850971938, 'learning_rate': 0.9829629131445341, 'epoch': 0.11} + 11%|█ | 58/520 [03:46<28:59, 3.77s/it] 11%|█▏ | 59/520 [03:50<29:02, 3.78s/it] {'loss': 2.3836, 'grad_norm': 0.0023985930913588607, 'learning_rate': 0.9821468850103139, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:50<29:02, 3.78s/it] 12%|█▏ | 60/520 [03:54<29:13, 3.81s/it] {'loss': 2.0038, 'grad_norm': 0.0012198915785750592, 'learning_rate': 0.981312123475006, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:54<29:13, 3.81s/it] 12%|█▏ | 61/520 [03:58<29:05, 3.80s/it] {'loss': 2.5175, 'grad_norm': 0.0026385146839231956, 'learning_rate': 0.9804586609725499, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:58<29:05, 3.80s/it] 12%|█▏ | 62/520 [04:01<28:45, 3.77s/it] {'loss': 1.9228, 'grad_norm': 0.0011864577037809505, 'learning_rate': 0.9795865306634939, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:01<28:45, 3.77s/it] 12%|█▏ | 63/520 [04:05<28:33, 3.75s/it] {'loss': 1.9867, 'grad_norm': 0.0011824261932605671, 'learning_rate': 0.978695766433709, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:05<28:33, 3.75s/it] 12%|█▏ | 64/520 [04:09<28:36, 3.76s/it] {'loss': 1.9764, 'grad_norm': 0.0014224426034095611, 'learning_rate': 0.9777864028930704, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:09<28:36, 3.76s/it] 12%|█▎ | 65/520 [04:13<28:43, 3.79s/it] {'loss': 2.0051, 'grad_norm': 0.0013997574686842973, 'learning_rate': 0.9768584753741134, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:13<28:43, 3.79s/it] 13%|█▎ | 66/520 [04:17<28:29, 3.77s/it] {'loss': 1.9561, 'grad_norm': 0.0011056281373932478, 'learning_rate': 0.9759120199306612, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:17<28:29, 3.77s/it] 13%|█▎ | 67/520 [04:20<28:26, 3.77s/it] {'loss': 1.7503, 'grad_norm': 0.0009902769929746858, 'learning_rate': 0.9749470733364229, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:20<28:26, 3.77s/it] 13%|█▎ | 68/520 [04:24<28:08, 3.74s/it] {'loss': 1.7884, 'grad_norm': 0.001076503645541333, 'learning_rate': 0.9739636730835659, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:24<28:08, 3.74s/it] 13%|█▎ | 69/520 [04:28<28:00, 3.73s/it] {'loss': 1.7854, 'grad_norm': 0.001180005485810773, 'learning_rate': 0.972961857381258, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:28<28:00, 3.73s/it] 13%|█▎ | 70/520 [04:31<28:04, 3.74s/it] {'loss': 1.8699, 'grad_norm': 0.0011690531130407486, 'learning_rate': 0.9719416651541838, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:31<28:04, 3.74s/it] 14%|█▎ | 71/520 [04:35<27:56, 3.73s/it] {'loss': 1.7394, 'grad_norm': 0.0011448618129773944, 'learning_rate': 0.9709031360410317, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:35<27:56, 3.73s/it] 14%|█▍ | 72/520 [04:39<27:45, 3.72s/it] {'loss': 1.9158, 'grad_norm': 0.0012912107912877403, 'learning_rate': 0.9698463103929542, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:39<27:45, 3.72s/it] 14%|█▍ | 73/520 [04:43<27:47, 3.73s/it] {'loss': 1.7086, 'grad_norm': 0.0009861557779005377, 'learning_rate': 0.9687712292719997, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:43<27:47, 3.73s/it] 14%|█▍ | 74/520 [04:46<27:39, 3.72s/it] {'loss': 1.88, 'grad_norm': 0.001455926988329665, 'learning_rate': 0.967677934449517, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:46<27:39, 3.72s/it] 14%|█▍ | 75/520 [04:50<27:26, 3.70s/it] {'loss': 1.7213, 'grad_norm': 0.0009053548276610441, 'learning_rate': 0.9665664684045332, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:50<27:26, 3.70s/it] 15%|█▍ | 76/520 [04:54<27:35, 3.73s/it] {'loss': 2.4268, 'grad_norm': 0.0019049322241631926, 'learning_rate': 0.9654368743221021, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:54<27:35, 3.73s/it] 15%|█▍ | 77/520 [04:57<27:30, 3.73s/it] {'loss': 1.6918, 'grad_norm': 0.0017770508429575636, 'learning_rate': 0.9642891960916268, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:57<27:30, 3.73s/it] 15%|█▌ | 78/520 [05:01<27:20, 3.71s/it] {'loss': 1.8086, 'grad_norm': 0.001463081377410731, 'learning_rate': 0.9631234783051543, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:01<27:20, 3.71s/it] 15%|█▌ | 79/520 [05:05<27:25, 3.73s/it] {'loss': 1.7752, 'grad_norm': 0.0011369561820543618, 'learning_rate': 0.9619397662556434, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:05<27:25, 3.73s/it] 15%|█▌ | 80/520 [05:09<27:20, 3.73s/it] {'loss': 2.4643, 'grad_norm': 0.0019435406241693342, 'learning_rate': 0.9607381059352038, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:09<27:20, 3.73s/it] 16%|█▌ | 81/520 [05:12<27:13, 3.72s/it] {'loss': 2.0155, 'grad_norm': 0.001303347555475902, 'learning_rate': 0.9595185440333103, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:12<27:13, 3.72s/it] 16%|█▌ | 82/520 [05:16<27:04, 3.71s/it] {'loss': 1.8859, 'grad_norm': 0.0015083123736273315, 'learning_rate': 0.9582811279349881, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:16<27:04, 3.71s/it] 16%|█▌ | 83/520 [05:20<26:58, 3.70s/it] {'loss': 1.9125, 'grad_norm': 0.0011130978965730096, 'learning_rate': 0.9570259057189716, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:20<26:58, 3.70s/it] 16%|█▌ | 84/520 [05:23<26:51, 3.70s/it] {'loss': 1.8766, 'grad_norm': 0.001417261519092897, 'learning_rate': 0.9557529261558366, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:23<26:51, 3.70s/it] 16%|█▋ | 85/520 [05:27<26:52, 3.71s/it] {'loss': 1.8786, 'grad_norm': 0.0011015527573916746, 'learning_rate': 0.9544622387061055, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:27<26:52, 3.71s/it] 17%|█▋ | 86/520 [05:31<26:42, 3.69s/it] {'loss': 1.9783, 'grad_norm': 0.0010938850799367995, 'learning_rate': 0.953153893518325, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:31<26:42, 3.69s/it] 17%|█▋ | 87/520 [05:34<26:40, 3.70s/it] {'loss': 2.373, 'grad_norm': 0.0021499649318719996, 'learning_rate': 0.9518279414271184, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:34<26:40, 3.70s/it] 17%|█▋ | 88/520 [05:38<26:37, 3.70s/it] {'loss': 2.6184, 'grad_norm': 0.0022587325495340635, 'learning_rate': 0.9504844339512095, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:38<26:37, 3.70s/it] 17%|█▋ | 89/520 [05:42<26:32, 3.70s/it] {'loss': 1.8487, 'grad_norm': 0.0009555142167415004, 'learning_rate': 0.9491234232914221, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:42<26:32, 3.70s/it] 17%|█▋ | 90/520 [05:46<26:35, 3.71s/it] {'loss': 1.7612, 'grad_norm': 0.0012075721558613415, 'learning_rate': 0.9477449623286505, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:46<26:35, 3.71s/it] 18%|█▊ | 91/520 [05:49<26:35, 3.72s/it] {'loss': 1.8598, 'grad_norm': 0.000901842030460327, 'learning_rate': 0.9463491046218058, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:49<26:35, 3.72s/it] 18%|█▊ | 92/520 [05:53<26:25, 3.70s/it] {'loss': 1.7876, 'grad_norm': 0.0011467196779505438, 'learning_rate': 0.9449359044057344, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:53<26:25, 3.70s/it] 18%|█▊ | 93/520 [05:57<26:24, 3.71s/it] {'loss': 1.7729, 'grad_norm': 0.017370479102267515, 'learning_rate': 0.9435054165891108, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:57<26:24, 3.71s/it] 18%|█▊ | 94/520 [06:00<26:15, 3.70s/it] {'loss': 1.9283, 'grad_norm': 0.0011591569911749357, 'learning_rate': 0.9420576967523049, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:00<26:15, 3.70s/it] 18%|█▊ | 95/520 [06:04<26:12, 3.70s/it] {'loss': 1.7545, 'grad_norm': 0.001575232768637315, 'learning_rate': 0.9405928011452211, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:04<26:12, 3.70s/it] 18%|█▊ | 96/520 [06:08<26:06, 3.69s/it] {'loss': 1.7555, 'grad_norm': 0.002244741622314421, 'learning_rate': 0.9391107866851143, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:08<26:06, 3.69s/it] 19%|█▊ | 97/520 [06:11<25:56, 3.68s/it] {'loss': 1.7465, 'grad_norm': 0.0014847436213539273, 'learning_rate': 0.9376117109543769, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:11<25:56, 3.68s/it] 19%|█▉ | 98/520 [06:15<25:50, 3.67s/it] {'loss': 1.7515, 'grad_norm': 0.0010848690787713066, 'learning_rate': 0.9360956321983027, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:15<25:50, 3.67s/it] 19%|█▉ | 99/520 [06:19<25:48, 3.68s/it] {'loss': 1.7642, 'grad_norm': 0.0009035590305737296, 'learning_rate': 0.9345626093228232, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:19<25:48, 3.68s/it] 19%|█▉ | 100/520 [06:22<25:43, 3.68s/it] {'loss': 2.1481, 'grad_norm': 0.0018886696165509073, 'learning_rate': 0.9330127018922194, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:22<25:43, 3.68s/it] 19%|█▉ | 101/520 [06:26<25:38, 3.67s/it] {'loss': 1.7728, 'grad_norm': 0.007412986337859689, 'learning_rate': 0.9314459701268065, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:26<25:38, 3.67s/it] 20%|█▉ | 102/520 [06:30<25:32, 3.67s/it] {'loss': 1.7498, 'grad_norm': 0.0011465554517796597, 'learning_rate': 0.9298624749005951, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:30<25:32, 3.67s/it] 20%|█▉ | 103/520 [06:34<25:37, 3.69s/it] {'loss': 1.7057, 'grad_norm': 0.0012555029127576584, 'learning_rate': 0.9282622777389258, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:34<25:37, 3.69s/it] 20%|██ | 104/520 [06:37<25:32, 3.68s/it] {'loss': 1.7813, 'grad_norm': 0.0010050775866357819, 'learning_rate': 0.9266454408160778, 'epoch': 0.2} + 20%|██ | 104/520 [06:37<25:32, 3.68s/it] 20%|██ | 105/520 [06:41<25:23, 3.67s/it] {'loss': 1.7593, 'grad_norm': 0.000921743374823032, 'learning_rate': 0.9250120269528546, 'epoch': 0.2} + 20%|██ | 105/520 [06:41<25:23, 3.67s/it] 20%|██ | 106/520 [06:45<25:35, 3.71s/it] {'loss': 2.1542, 'grad_norm': 0.0011211075661431529, 'learning_rate': 0.9233620996141421, 'epoch': 0.2} + 20%|██ | 106/520 [06:45<25:35, 3.71s/it] 21%|██ | 107/520 [06:48<25:27, 3.70s/it] {'loss': 2.1562, 'grad_norm': 0.0020739279890571597, 'learning_rate': 0.9216957229064429, 'epoch': 0.21} + 21%|██ | 107/520 [06:48<25:27, 3.70s/it] 21%|██ | 108/520 [06:52<25:20, 3.69s/it] {'loss': 1.7104, 'grad_norm': 0.0017265892293343266, 'learning_rate': 0.9200129615753858, 'epoch': 0.21} + 21%|██ | 108/520 [06:52<25:20, 3.69s/it] 21%|██ | 109/520 [06:56<25:16, 3.69s/it] {'loss': 2.1361, 'grad_norm': 0.0014685685969660217, 'learning_rate': 0.9183138810032099, 'epoch': 0.21} + 21%|██ | 109/520 [06:56<25:16, 3.69s/it] 21%|██ | 110/520 [06:59<25:06, 3.67s/it] {'loss': 1.923, 'grad_norm': 0.0012824651648839752, 'learning_rate': 0.9165985472062245, 'epoch': 0.21} + 21%|██ | 110/520 [06:59<25:06, 3.67s/it] 21%|██▏ | 111/520 [07:03<25:02, 3.67s/it] {'loss': 1.9629, 'grad_norm': 0.0009147687374452316, 'learning_rate': 0.9148670268322437, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:03<25:02, 3.67s/it] 22%|██▏ | 112/520 [07:07<24:58, 3.67s/it] {'loss': 1.8221, 'grad_norm': 0.000961306050635509, 'learning_rate': 0.9131193871579975, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:07<24:58, 3.67s/it] 22%|██▏ | 113/520 [07:10<24:50, 3.66s/it] {'loss': 1.6516, 'grad_norm': 0.0008836167807848348, 'learning_rate': 0.9113556960865167, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:10<24:50, 3.66s/it] 22%|██▏ | 114/520 [07:14<24:50, 3.67s/it] {'loss': 1.787, 'grad_norm': 0.0011972149223164286, 'learning_rate': 0.909576022144496, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:14<24:50, 3.67s/it] 22%|██▏ | 115/520 [07:18<24:49, 3.68s/it] {'loss': 1.9425, 'grad_norm': 0.0009453145354376448, 'learning_rate': 0.9077804344796301, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:18<24:49, 3.68s/it] 22%|██▏ | 116/520 [07:21<24:40, 3.67s/it] {'loss': 1.8742, 'grad_norm': 0.0009092400643124921, 'learning_rate': 0.9059690028579284, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:21<24:40, 3.67s/it] 22%|██▎ | 117/520 [07:25<24:38, 3.67s/it] {'loss': 1.8788, 'grad_norm': 0.0020402445191792995, 'learning_rate': 0.9041417976610027, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:25<24:38, 3.67s/it] 23%|██▎ | 118/520 [07:29<24:32, 3.66s/it] {'loss': 1.7268, 'grad_norm': 0.001139259432347087, 'learning_rate': 0.9022988898833342, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:29<24:32, 3.66s/it] 23%|██▎ | 119/520 [07:32<24:24, 3.65s/it] {'loss': 1.6723, 'grad_norm': 0.0008774212096963476, 'learning_rate': 0.900440351129514, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:32<24:24, 3.65s/it] 23%|██▎ | 120/520 [07:36<24:20, 3.65s/it] {'loss': 1.7065, 'grad_norm': 0.0016900893662366216, 'learning_rate': 0.8985662536114613, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:36<24:20, 3.65s/it] 23%|██▎ | 121/520 [07:40<24:17, 3.65s/it] {'loss': 1.761, 'grad_norm': 0.000973483479217235, 'learning_rate': 0.8966766701456176, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:40<24:17, 3.65s/it] 23%|██▎ | 122/520 [07:43<24:18, 3.66s/it] {'loss': 1.6239, 'grad_norm': 0.001074899018736689, 'learning_rate': 0.8947716741501177, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:43<24:18, 3.66s/it] 24%|██▎ | 123/520 [07:47<24:17, 3.67s/it] {'loss': 2.2232, 'grad_norm': 0.0018627813824668546, 'learning_rate': 0.8928513396419369, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:47<24:17, 3.67s/it] 24%|██▍ | 124/520 [07:51<24:17, 3.68s/it] {'loss': 1.7553, 'grad_norm': 0.0011791081449694062, 'learning_rate': 0.890915741234015, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:51<24:17, 3.68s/it] 24%|██▍ | 125/520 [07:55<24:33, 3.73s/it] {'loss': 1.7106, 'grad_norm': 0.0010988578980630347, 'learning_rate': 0.8889649541323574, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:55<24:33, 3.73s/it] 24%|██▍ | 126/520 [07:59<26:04, 3.97s/it] {'loss': 2.0163, 'grad_norm': 0.0014725429088044918, 'learning_rate': 0.8869990541331138, 'epoch': 0.24} + 24%|██▍ | 126/520 [07:59<26:04, 3.97s/it] 24%|██▍ | 127/520 [08:03<25:55, 3.96s/it] {'loss': 1.7025, 'grad_norm': 0.001567456133219085, 'learning_rate': 0.8850181176196315, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:03<25:55, 3.96s/it] 25%|██▍ | 128/520 [08:07<25:39, 3.93s/it] {'loss': 1.7676, 'grad_norm': 0.0015556686415311642, 'learning_rate': 0.883022221559489, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:07<25:39, 3.93s/it] 25%|██▍ | 129/520 [08:11<25:28, 3.91s/it] {'loss': 1.6523, 'grad_norm': 0.0010460544624955469, 'learning_rate': 0.8810114435015054, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:11<25:28, 3.91s/it] 25%|██▌ | 130/520 [08:15<25:19, 3.90s/it] {'loss': 1.7437, 'grad_norm': 0.0013338807050245107, 'learning_rate': 0.8789858615727265, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:15<25:19, 3.90s/it] 25%|██▌ | 131/520 [08:18<25:10, 3.88s/it] {'loss': 2.021, 'grad_norm': 0.002131992172952577, 'learning_rate': 0.8769455544753899, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:18<25:10, 3.88s/it] 25%|██▌ | 132/520 [08:22<24:59, 3.87s/it] {'loss': 1.7852, 'grad_norm': 0.0008118628080885545, 'learning_rate': 0.8748906014838671, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:22<24:59, 3.87s/it] 26%|██▌ | 133/520 [08:26<24:57, 3.87s/it] {'loss': 1.7026, 'grad_norm': 0.001301861458760204, 'learning_rate': 0.8728210824415827, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:26<24:57, 3.87s/it] 26%|██▌ | 134/520 [08:30<24:47, 3.85s/it] {'loss': 1.7754, 'grad_norm': 0.0007689248743569898, 'learning_rate': 0.8707370777579133, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:30<24:47, 3.85s/it] 26%|██▌ | 135/520 [08:34<24:47, 3.86s/it] {'loss': 1.8707, 'grad_norm': 0.0012937118720914625, 'learning_rate': 0.868638668405062, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:34<24:47, 3.86s/it] 26%|██▌ | 136/520 [08:38<24:42, 3.86s/it] {'loss': 1.7658, 'grad_norm': 0.0008744142253696396, 'learning_rate': 0.8665259359149131, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:38<24:42, 3.86s/it] 26%|██▋ | 137/520 [08:42<24:39, 3.86s/it] {'loss': 1.726, 'grad_norm': 0.0025477991147283305, 'learning_rate': 0.8643989623758642, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:42<24:39, 3.86s/it] 27%|██▋ | 138/520 [08:45<24:33, 3.86s/it] {'loss': 1.6845, 'grad_norm': 0.0007469664626923284, 'learning_rate': 0.8622578304296363, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:45<24:33, 3.86s/it] 27%|██▋ | 139/520 [08:49<24:33, 3.87s/it] {'loss': 1.9194, 'grad_norm': 0.001713701617625232, 'learning_rate': 0.8601026232680633, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:49<24:33, 3.87s/it] 27%|██▋ | 140/520 [08:53<24:26, 3.86s/it] {'loss': 2.0593, 'grad_norm': 0.0012286170602424052, 'learning_rate': 0.8579334246298592, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:53<24:26, 3.86s/it] 27%|██▋ | 141/520 [08:57<24:25, 3.87s/it] {'loss': 1.8133, 'grad_norm': 0.0009006564049758343, 'learning_rate': 0.8557503187973651, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:57<24:25, 3.87s/it] 27%|██▋ | 142/520 [09:01<24:15, 3.85s/it] {'loss': 2.1072, 'grad_norm': 0.0009235823768577814, 'learning_rate': 0.8535533905932737, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:01<24:15, 3.85s/it] 28%|██▊ | 143/520 [09:05<24:18, 3.87s/it] {'loss': 1.7513, 'grad_norm': 0.0010279008226959255, 'learning_rate': 0.8513427253773346, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:05<24:18, 3.87s/it] 28%|██▊ | 144/520 [09:08<23:59, 3.83s/it] {'loss': 1.6551, 'grad_norm': 0.0010493221670852377, 'learning_rate': 0.8491184090430364, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:08<23:59, 3.83s/it] 28%|██▊ | 145/520 [09:12<23:44, 3.80s/it] {'loss': 1.5861, 'grad_norm': 0.001124422483886582, 'learning_rate': 0.8468805280142708, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:12<23:44, 3.80s/it] 28%|██▊ | 146/520 [09:16<23:33, 3.78s/it] {'loss': 2.1616, 'grad_norm': 0.0014973111653814137, 'learning_rate': 0.8446291692419735, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:16<23:33, 3.78s/it] 28%|██▊ | 147/520 [09:20<23:27, 3.77s/it] {'loss': 1.6328, 'grad_norm': 0.0008624392591371629, 'learning_rate': 0.8423644202007468, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:20<23:27, 3.77s/it] 28%|██▊ | 148/520 [09:23<23:20, 3.76s/it] {'loss': 1.6994, 'grad_norm': 0.00082436689049975, 'learning_rate': 0.8400863688854596, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:23<23:20, 3.76s/it] 29%|██▊ | 149/520 [09:27<23:11, 3.75s/it] {'loss': 1.6512, 'grad_norm': 0.001083172291030409, 'learning_rate': 0.8377951038078302, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:27<23:11, 3.75s/it] 29%|██▉ | 150/520 [09:31<23:06, 3.75s/it] {'loss': 1.8722, 'grad_norm': 0.001225647565716589, 'learning_rate': 0.835490713992985, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:31<23:06, 3.75s/it] 29%|██▉ | 151/520 [09:35<23:12, 3.77s/it] {'loss': 1.6582, 'grad_norm': 0.0015880719475239982, 'learning_rate': 0.833173288976002, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:35<23:12, 3.77s/it] 29%|██▉ | 152/520 [09:39<23:10, 3.78s/it] {'loss': 1.6343, 'grad_norm': 0.0009461827758239073, 'learning_rate': 0.8308429187984298, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:39<23:10, 3.78s/it] 29%|██▉ | 153/520 [09:42<22:52, 3.74s/it] {'loss': 1.6645, 'grad_norm': 0.0008943339704779512, 'learning_rate': 0.8284996940047903, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:42<22:52, 3.74s/it] 30%|██▉ | 154/520 [09:46<22:41, 3.72s/it] {'loss': 1.775, 'grad_norm': 0.000856968580144399, 'learning_rate': 0.8261437056390606, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:46<22:41, 3.72s/it] 30%|██▉ | 155/520 [09:50<22:34, 3.71s/it] {'loss': 1.6496, 'grad_norm': 0.0012063582178186763, 'learning_rate': 0.8237750452411352, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:50<22:34, 3.71s/it] 30%|███ | 156/520 [09:53<22:28, 3.71s/it] {'loss': 1.716, 'grad_norm': 0.001266935675482215, 'learning_rate': 0.8213938048432696, 'epoch': 0.3} + 30%|███ | 156/520 [09:53<22:28, 3.71s/it] 30%|███ | 157/520 [09:57<22:25, 3.71s/it] {'loss': 2.1777, 'grad_norm': 0.0010612801258003068, 'learning_rate': 0.8190000769665043, 'epoch': 0.3} + 30%|███ | 157/520 [09:57<22:25, 3.71s/it] 30%|███ | 158/520 [10:01<22:18, 3.70s/it] {'loss': 1.6644, 'grad_norm': 0.0008539074335724022, 'learning_rate': 0.81659395461707, 'epoch': 0.3} + 30%|███ | 158/520 [10:01<22:18, 3.70s/it] 31%|███ | 159/520 [10:04<22:12, 3.69s/it] {'loss': 1.6833, 'grad_norm': 0.0007097959627980638, 'learning_rate': 0.8141755312827736, 'epoch': 0.31} + 31%|███ | 159/520 [10:04<22:12, 3.69s/it] 31%|███ | 160/520 [10:08<22:25, 3.74s/it] {'loss': 1.7281, 'grad_norm': 0.0007373275063376402, 'learning_rate': 0.8117449009293668, 'epoch': 0.31} + 31%|███ | 160/520 [10:08<22:25, 3.74s/it] 31%|███ | 161/520 [10:12<22:38, 3.78s/it] {'loss': 1.7273, 'grad_norm': 0.0009289417908199032, 'learning_rate': 0.8093021579968941, 'epoch': 0.31} + 31%|███ | 161/520 [10:12<22:38, 3.78s/it] 31%|███ | 162/520 [10:16<22:41, 3.80s/it] {'loss': 2.0642, 'grad_norm': 0.0016176080884794425, 'learning_rate': 0.8068473973960237, 'epoch': 0.31} + 31%|███ | 162/520 [10:16<22:41, 3.80s/it] 31%|███▏ | 163/520 [10:20<22:41, 3.81s/it] {'loss': 1.6276, 'grad_norm': 0.0012539144703408473, 'learning_rate': 0.8043807145043603, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:20<22:41, 3.81s/it] 32%|███▏ | 164/520 [10:24<22:41, 3.82s/it] {'loss': 1.5226, 'grad_norm': 0.0015470827829827535, 'learning_rate': 0.8019022051627387, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:24<22:41, 3.82s/it] 32%|███▏ | 165/520 [10:27<22:40, 3.83s/it] {'loss': 1.6611, 'grad_norm': 0.0007883644194623919, 'learning_rate': 0.7994119656715002, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:27<22:40, 3.83s/it] 32%|███▏ | 166/520 [10:31<22:30, 3.81s/it] {'loss': 1.6784, 'grad_norm': 0.0014160627260507083, 'learning_rate': 0.7969100927867507, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:31<22:30, 3.81s/it] 32%|███▏ | 167/520 [10:35<22:28, 3.82s/it] {'loss': 1.6692, 'grad_norm': 0.0014778180252180005, 'learning_rate': 0.7943966837166023, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:35<22:28, 3.82s/it] 32%|███▏ | 168/520 [10:39<22:13, 3.79s/it] {'loss': 1.6238, 'grad_norm': 0.001214659646887025, 'learning_rate': 0.791871836117395, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:39<22:13, 3.79s/it] 32%|███▎ | 169/520 [10:42<21:57, 3.75s/it] {'loss': 1.6816, 'grad_norm': 0.0014123735486481247, 'learning_rate': 0.789335648089903, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:42<21:57, 3.75s/it] 33%|███▎ | 170/520 [10:46<21:49, 3.74s/it] {'loss': 1.9418, 'grad_norm': 0.0014827452514088466, 'learning_rate': 0.786788218175523, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:46<21:49, 3.74s/it] 33%|███▎ | 171/520 [10:50<21:41, 3.73s/it] {'loss': 1.6191, 'grad_norm': 0.0008275187734145699, 'learning_rate': 0.7842296453524462, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:50<21:41, 3.73s/it] 33%|███▎ | 172/520 [10:54<21:36, 3.72s/it] {'loss': 1.697, 'grad_norm': 0.0010737391668388105, 'learning_rate': 0.781660029031811, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:54<21:36, 3.72s/it] 33%|███▎ | 173/520 [10:57<21:37, 3.74s/it] {'loss': 1.6036, 'grad_norm': 0.0010991968004506467, 'learning_rate': 0.7790794690538421, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:57<21:37, 3.74s/it] 33%|███▎ | 174/520 [11:01<21:51, 3.79s/it] {'loss': 1.7298, 'grad_norm': 0.0009221471085793677, 'learning_rate': 0.7764880656839697, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:01<21:51, 3.79s/it] 34%|███▎ | 175/520 [11:05<22:09, 3.85s/it] {'loss': 1.5999, 'grad_norm': 0.0010168317386920526, 'learning_rate': 0.7738859196089357, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:05<22:09, 3.85s/it] 34%|███▍ | 176/520 [11:09<22:09, 3.86s/it] {'loss': 2.0828, 'grad_norm': 0.001893944158248602, 'learning_rate': 0.7712731319328797, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:09<22:09, 3.86s/it] 34%|███▍ | 177/520 [11:13<22:04, 3.86s/it] {'loss': 1.9061, 'grad_norm': 0.0012448564478604448, 'learning_rate': 0.768649804173412, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:13<22:04, 3.86s/it] 34%|███▍ | 178/520 [11:17<22:09, 3.89s/it] {'loss': 1.6595, 'grad_norm': 0.0008944847063260531, 'learning_rate': 0.7660160382576683, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:17<22:09, 3.89s/it] 34%|███▍ | 179/520 [11:21<22:05, 3.89s/it] {'loss': 1.7814, 'grad_norm': 0.0018490087131130177, 'learning_rate': 0.7633719365183503, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:21<22:05, 3.89s/it] 35%|███▍ | 180/520 [11:25<22:10, 3.91s/it] {'loss': 1.6657, 'grad_norm': 0.0016598649816631124, 'learning_rate': 0.760717601689749, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:25<22:10, 3.91s/it] 35%|███▍ | 181/520 [11:29<22:03, 3.90s/it] {'loss': 1.646, 'grad_norm': 0.0009799690710634251, 'learning_rate': 0.7580531369037533, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:29<22:03, 3.90s/it] 35%|███▌ | 182/520 [11:33<21:59, 3.90s/it] {'loss': 1.7058, 'grad_norm': 0.0009265449214964675, 'learning_rate': 0.7553786456858429, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:33<21:59, 3.90s/it] 35%|███▌ | 183/520 [11:36<21:57, 3.91s/it] {'loss': 1.6947, 'grad_norm': 0.0020407412643909284, 'learning_rate': 0.7526942319510654, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:36<21:57, 3.91s/it] 35%|███▌ | 184/520 [11:40<21:39, 3.87s/it] {'loss': 1.5951, 'grad_norm': 0.0012139113465959618, 'learning_rate': 0.75, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:40<21:39, 3.87s/it] 36%|███▌ | 185/520 [11:44<21:22, 3.83s/it] {'loss': 1.8134, 'grad_norm': 0.0007774733959082232, 'learning_rate': 0.7472960545147037, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:44<21:22, 3.83s/it] 36%|███▌ | 186/520 [11:48<21:07, 3.79s/it] {'loss': 1.5946, 'grad_norm': 0.0010552398734454667, 'learning_rate': 0.7445825005546447, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:48<21:07, 3.79s/it] 36%|███▌ | 187/520 [11:51<20:58, 3.78s/it] {'loss': 1.638, 'grad_norm': 0.0008454324323697925, 'learning_rate': 0.7418594435526199, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:51<20:58, 3.78s/it] 36%|███▌ | 188/520 [11:55<20:50, 3.77s/it] {'loss': 1.6928, 'grad_norm': 0.0011991142222128027, 'learning_rate': 0.7391269893106591, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:55<20:50, 3.77s/it] 36%|███▋ | 189/520 [11:59<20:40, 3.75s/it] {'loss': 1.7491, 'grad_norm': 0.0013735032550663336, 'learning_rate': 0.7363852439959135, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:59<20:40, 3.75s/it] 37%|███▋ | 190/520 [12:03<20:28, 3.72s/it] {'loss': 1.6241, 'grad_norm': 0.0011859959479877695, 'learning_rate': 0.733634314136531, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:03<20:28, 3.72s/it] 37%|███▋ | 191/520 [12:06<20:22, 3.71s/it] {'loss': 1.584, 'grad_norm': 0.0008548379141048204, 'learning_rate': 0.7308743066175171, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:06<20:22, 3.71s/it] 37%|███▋ | 192/520 [12:10<20:20, 3.72s/it] {'loss': 1.7003, 'grad_norm': 0.0008949665339781738, 'learning_rate': 0.7281053286765815, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:10<20:20, 3.72s/it] 37%|███▋ | 193/520 [12:14<20:15, 3.72s/it] {'loss': 2.0247, 'grad_norm': 0.001753563912725415, 'learning_rate': 0.7253274878999727, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:14<20:15, 3.72s/it] 37%|███▋ | 194/520 [12:17<20:11, 3.72s/it] {'loss': 1.8589, 'grad_norm': 0.002234700479310925, 'learning_rate': 0.7225408922182961, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:17<20:11, 3.72s/it] 38%|███▊ | 195/520 [12:21<20:05, 3.71s/it] {'loss': 1.6788, 'grad_norm': 0.000978732253831131, 'learning_rate': 0.7197456499023225, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:21<20:05, 3.71s/it] 38%|███▊ | 196/520 [12:25<20:08, 3.73s/it] {'loss': 1.6832, 'grad_norm': 0.001458510136284706, 'learning_rate': 0.716941869558779, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:25<20:08, 3.73s/it] 38%|███▊ | 197/520 [12:29<20:15, 3.76s/it] {'loss': 1.6298, 'grad_norm': 0.0009176714238151189, 'learning_rate': 0.7141296601261313, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:29<20:15, 3.76s/it] 38%|███▊ | 198/520 [12:32<20:03, 3.74s/it] {'loss': 1.7347, 'grad_norm': 0.0008557162180198911, 'learning_rate': 0.7113091308703497, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:32<20:03, 3.74s/it] 38%|███▊ | 199/520 [12:36<19:58, 3.73s/it] {'loss': 1.6192, 'grad_norm': 0.000780505268695417, 'learning_rate': 0.7084803913806641, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:36<19:58, 3.73s/it] 38%|███▊ | 200/520 [12:40<19:51, 3.72s/it] {'loss': 1.9128, 'grad_norm': 0.001077938039963779, 'learning_rate': 0.7056435515653059, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:40<19:51, 3.72s/it] 39%|███▊ | 201/520 [12:44<19:56, 3.75s/it] {'loss': 1.8813, 'grad_norm': 0.0011561643929356514, 'learning_rate': 0.7027987216472376, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:44<19:56, 3.75s/it] 39%|███▉ | 202/520 [12:47<19:56, 3.76s/it] {'loss': 1.6127, 'grad_norm': 0.0009212400650684418, 'learning_rate': 0.6999460121598704, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:47<19:56, 3.76s/it] 39%|███▉ | 203/520 [12:51<19:46, 3.74s/it] {'loss': 1.6561, 'grad_norm': 0.0009399234090174329, 'learning_rate': 0.6970855339427697, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:51<19:46, 3.74s/it] 39%|███▉ | 204/520 [12:55<19:44, 3.75s/it] {'loss': 1.7521, 'grad_norm': 0.0009230045596658133, 'learning_rate': 0.6942173981373474, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:55<19:44, 3.75s/it] 39%|███▉ | 205/520 [12:59<19:40, 3.75s/it] {'loss': 1.9551, 'grad_norm': 0.0012722342757904347, 'learning_rate': 0.6913417161825449, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:59<19:40, 3.75s/it] 40%|███▉ | 206/520 [13:02<19:42, 3.77s/it] {'loss': 1.7618, 'grad_norm': 0.0008604953950460077, 'learning_rate': 0.6884585998105026, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:02<19:42, 3.77s/it] 40%|███▉ | 207/520 [13:06<19:39, 3.77s/it] {'loss': 1.9172, 'grad_norm': 0.0012995990255675269, 'learning_rate': 0.685568161042219, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:06<19:39, 3.77s/it] 40%|████ | 208/520 [13:10<19:29, 3.75s/it] {'loss': 1.6796, 'grad_norm': 0.0009613851017153854, 'learning_rate': 0.6826705121831976, 'epoch': 0.4} + 40%|████ | 208/520 [13:10<19:29, 3.75s/it] 40%|████ | 209/520 [13:14<19:19, 3.73s/it] {'loss': 1.6479, 'grad_norm': 0.0009919040548917232, 'learning_rate': 0.6797657658190838, 'epoch': 0.4} + 40%|████ | 209/520 [13:14<19:19, 3.73s/it] 40%|████ | 210/520 [13:17<19:11, 3.71s/it] {'loss': 1.6794, 'grad_norm': 0.0009843062619107011, 'learning_rate': 0.6768540348112907, 'epoch': 0.4} + 40%|████ | 210/520 [13:17<19:11, 3.71s/it] 41%|████ | 211/520 [13:21<19:05, 3.71s/it] {'loss': 1.6933, 'grad_norm': 0.0006899753536791462, 'learning_rate': 0.6739354322926135, 'epoch': 0.41} + 41%|████ | 211/520 [13:21<19:05, 3.71s/it] 41%|████ | 212/520 [13:25<19:01, 3.71s/it] {'loss': 1.6689, 'grad_norm': 0.000940652290561658, 'learning_rate': 0.6710100716628344, 'epoch': 0.41} + 41%|████ | 212/520 [13:25<19:01, 3.71s/it] \ No newline at end of file diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055702.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055702.log new file mode 100644 index 0000000000000000000000000000000000000000..47c9d70f22d0e15241658ed68e81dd21989140db --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055702.log @@ -0,0 +1,312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055702.log +Timestamp: 2025-10-10 05:57:02 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:57:04,790] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:57:07,488] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 05:57:07,490] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:57:10,112] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:57:11,166] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 05:57:11,167] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 05:57:11,167] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 05:57:11,167] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 05:57:11,167] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 05:57:11,167] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 05:57:11,167] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 05:57:11,169] [INFO] [launch.py:253:main] process 1775550 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:57:11,171] [INFO] [launch.py:253:main] process 1775551 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:57:11,173] [INFO] [launch.py:253:main] process 1775552 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:57:11,175] [INFO] [launch.py:253:main] process 1775553 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:57:11,177] [INFO] [launch.py:253:main] process 1775554 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:57:11,179] [INFO] [launch.py:253:main] process 1775555 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:57:11,181] [INFO] [launch.py:253:main] process 1775556 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:57:11,183] [INFO] [launch.py:253:main] process 1775557 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:57:17,634] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:57:17,967] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:57:18,000] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:57:18,022] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:57:18,022] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:57:18,023] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:57:18,023] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:57:18,048] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:57:18,052] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:57:18,370] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:57:18,406] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:57:18,429] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:57:18,429] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:57:18,429] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 05:57:18,430] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:57:18,431] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:57:18,453] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed. + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train + model = training_recipe.load(model, model_args) + File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load + model.load_llm(**model_args['llm']) + File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm + self.language_model = self.language_model.from_pretrained( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained + resolved_config_file = cached_file( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed. + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train + model = training_recipe.load(model, model_args) + File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load + model.load_llm(**model_args['llm']) + File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm + self.language_model = self.language_model.from_pretrained( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained + resolved_config_file = cached_file( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed. + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train + model = training_recipe.load(model, model_args) + File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load + model.load_llm(**model_args['llm']) + File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm + self.language_model = self.language_model.from_pretrained( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained + resolved_config_file = cached_file( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub. +[2025-10-10 05:57:56,238] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775550 +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed. + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train + model = training_recipe.load(model, model_args) + File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load + model.load_llm(**model_args['llm']) + File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm + self.language_model = self.language_model.from_pretrained( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained + resolved_config_file = cached_file( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub. +[2025-10-10 05:57:56,576] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775551 +[2025-10-10 05:57:56,994] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775552 +[2025-10-10 05:57:56,996] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775553 +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed. + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train + model = training_recipe.load(model, model_args) + File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load + model.load_llm(**model_args['llm']) + File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm + self.language_model = self.language_model.from_pretrained( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained + resolved_config_file = cached_file( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub. +[2025-10-10 05:57:57,453] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775554 +[2025-10-10 05:57:57,455] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775555 +[2025-10-10 05:57:57,455] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775556 +[2025-10-10 05:57:57,792] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1775557 +[2025-10-10 05:57:57,794] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055702.log +Timestamp: 2025-10-10 05:57:59 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055853.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055853.log new file mode 100644 index 0000000000000000000000000000000000000000..9d51953f51eb96295f9b1ded341ff272248af519 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055853.log @@ -0,0 +1,1167 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055853.log +Timestamp: 2025-10-10 05:58:53 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:58:55,997] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:58,692] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 05:58:58,693] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:59:01,316] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:02,339] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 05:59:02,339] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 05:59:02,339] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 05:59:02,339] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 05:59:02,339] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 05:59:02,339] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 05:59:02,339] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 05:59:02,342] [INFO] [launch.py:253:main] process 1779322 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:02,344] [INFO] [launch.py:253:main] process 1779323 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:02,346] [INFO] [launch.py:253:main] process 1779324 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:02,348] [INFO] [launch.py:253:main] process 1779325 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:02,350] [INFO] [launch.py:253:main] process 1779326 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:02,352] [INFO] [launch.py:253:main] process 1779327 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:02,354] [INFO] [launch.py:253:main] process 1779328 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:59:02,356] [INFO] [launch.py:253:main] process 1779329 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:59:08,933] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:09,133] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:09,262] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:09,263] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:09,325] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:09,325] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:09,325] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:09,341] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:09,350] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:59:09,530] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:09,530] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 05:59:09,664] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:09,668] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:09,726] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:09,728] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:09,729] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:59:09,753] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1779322:1779322 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779322:1779322 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779322:1779322 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1779322:1779322 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1779322:1779322 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1779322:1779322 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:1779329:1779329 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1779329:1779329 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779329:1779329 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779329:1779329 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1779329:1779329 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1779329:1779329 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1779326:1779326 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1779326:1779326 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779326:1779326 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779326:1779326 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1779326:1779326 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1779326:1779326 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1779327:1779327 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1779327:1779327 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779327:1779327 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779327:1779327 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1779327:1779327 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1779327:1779327 [5] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1779324:1779324 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1779324:1779324 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779324:1779324 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1779324:1779324 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1779324:1779324 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1779324:1779324 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1779325:1779325 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1779325:1779325 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779325:1779325 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779325:1779325 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1779325:1779325 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1779325:1779325 [3] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1779323:1779323 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1779323:1779323 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779323:1779323 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779323:1779323 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1779323:1779323 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1779323:1779323 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1779328:1779328 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1779328:1779328 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779328:1779328 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779328:1779328 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1779328:1779328 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1779328:1779328 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO ncclCommInitRank comm 0x555e772a0dc0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xa84e227216946c6b - Init START +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO ncclCommInitRank comm 0x5646a4ffeed0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xa84e227216946c6b - Init START +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO ncclCommInitRank comm 0x563c6bc502e0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xa84e227216946c6b - Init START +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO ncclCommInitRank comm 0x5559adef75e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xa84e227216946c6b - Init START +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO ncclCommInitRank comm 0x55befe67cf80 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xa84e227216946c6b - Init START +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO ncclCommInitRank comm 0x55b2dde47fb0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xa84e227216946c6b - Init START +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO ncclCommInitRank comm 0x555a0af25360 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xa84e227216946c6b - Init START +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO ncclCommInitRank comm 0x55566b6c0390 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xa84e227216946c6b - Init START +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO comm 0x563c6bc502e0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO comm 0x555e772a0dc0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO comm 0x55befe67cf80 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO comm 0x55b2dde47fb0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO comm 0x5559adef75e0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO comm 0x55566b6c0390 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO comm 0x5646a4ffeed0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO comm 0x555a0af25360 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1779326:1780951 [4] NCCL INFO ncclCommInitRank comm 0x5646a4ffeed0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xa84e227216946c6b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1779327:1780952 [5] NCCL INFO ncclCommInitRank comm 0x55566b6c0390 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xa84e227216946c6b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1779328:1780959 [6] NCCL INFO ncclCommInitRank comm 0x5559adef75e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xa84e227216946c6b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1779329:1780947 [7] NCCL INFO ncclCommInitRank comm 0x55b2dde47fb0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xa84e227216946c6b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1779325:1780954 [3] NCCL INFO ncclCommInitRank comm 0x555a0af25360 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xa84e227216946c6b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1779324:1780953 [2] NCCL INFO ncclCommInitRank comm 0x563c6bc502e0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xa84e227216946c6b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1779322:1780944 [0] NCCL INFO ncclCommInitRank comm 0x55befe67cf80 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xa84e227216946c6b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1779323:1780958 [1] NCCL INFO ncclCommInitRank comm 0x555e772a0dc0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xa84e227216946c6b - Init COMPLETE +[2025-10-10 05:59:58,237] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 05:59:59,982] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r"))train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +Pre-training init connector._connector.0.scores: Mean=3.000005 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +Pre-training init connector._connector.2.scores: Mean=2.999970 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +[2025-10-10 06:00:02,432] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1779322 +[2025-10-10 06:00:02,927] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1779323 +[2025-10-10 06:00:03,100] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1779324 +[2025-10-10 06:00:03,102] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1779325 +[2025-10-10 06:00:03,103] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1779326 +[2025-10-10 06:00:03,104] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1779327 +[2025-10-10 06:00:03,105] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1779328 +[2025-10-10 06:00:03,106] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1779329 +[2025-10-10 06:00:03,107] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_055853.log +Timestamp: 2025-10-10 06:00:04 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_060227.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_060227.log new file mode 100644 index 0000000000000000000000000000000000000000..9656236ba7b9f45d6b07014a99a48d9a3b09eb77 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_060227.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_060227.log +Timestamp: 2025-10-10 06:02:27 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:02:30,295] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:32,941] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 06:02:32,942] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:02:35,578] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:36,675] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 06:02:36,675] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 06:02:36,675] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 06:02:36,675] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 06:02:36,675] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 06:02:36,675] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 06:02:36,675] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 06:02:36,677] [INFO] [launch.py:253:main] process 1785296 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:36,679] [INFO] [launch.py:253:main] process 1785297 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:36,681] [INFO] [launch.py:253:main] process 1785298 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:36,683] [INFO] [launch.py:253:main] process 1785299 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:36,686] [INFO] [launch.py:253:main] process 1785300 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:36,688] [INFO] [launch.py:253:main] process 1785301 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:36,690] [INFO] [launch.py:253:main] process 1785302 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:02:36,692] [INFO] [launch.py:253:main] process 1785303 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:02:43,019] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:43,435] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:43,463] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:43,511] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:43,511] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:43,514] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:43,516] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:43,520] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:43,523] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:02:43,880] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:43,919] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:43,921] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:43,921] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 06:02:43,921] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:43,922] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:43,925] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:02:43,928] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1785296:1785296 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785296:1785296 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785296:1785296 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1785296:1785296 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1785296:1785296 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1785296:1785296 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1785297:1785297 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1785297:1785297 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785297:1785297 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785297:1785297 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1785297:1785297 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1785297:1785297 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1785302:1785302 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1785302:1785302 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785302:1785302 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785302:1785302 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1785302:1785302 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1785302:1785302 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1785300:1785300 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1785300:1785300 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785300:1785300 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785299:1785299 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1785299:1785299 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785300:1785300 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1785300:1785300 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1785300:1785300 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1785299:1785299 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785299:1785299 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1785299:1785299 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1785299:1785299 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1785301:1785301 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1785301:1785301 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785301:1785301 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785301:1785301 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1785301:1785301 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1785301:1785301 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1785303:1785303 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1785303:1785303 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785303:1785303 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785303:1785303 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1785303:1785303 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1785303:1785303 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1785298:1785298 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1785298:1785298 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785298:1785298 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785298:1785298 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1785298:1785298 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1785298:1785298 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO ncclCommInitRank comm 0x5623b57dbdc0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x6b4088a672825926 - Init START +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO ncclCommInitRank comm 0x562baccbe560 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x6b4088a672825926 - Init START +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO ncclCommInitRank comm 0x5600dd085730 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x6b4088a672825926 - Init START +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO ncclCommInitRank comm 0x56381a8e64c0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x6b4088a672825926 - Init START +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO ncclCommInitRank comm 0x5641a6254620 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x6b4088a672825926 - Init START +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO ncclCommInitRank comm 0x56443885e610 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x6b4088a672825926 - Init START +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO ncclCommInitRank comm 0x558677daf600 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x6b4088a672825926 - Init START +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO ncclCommInitRank comm 0x55a892cdd9c0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x6b4088a672825926 - Init START +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO comm 0x5623b57dbdc0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO comm 0x56381a8e64c0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO comm 0x5600dd085730 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO comm 0x56443885e610 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO comm 0x55a892cdd9c0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO comm 0x5641a6254620 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO comm 0x562baccbe560 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO comm 0x558677daf600 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1785296:1786938 [0] NCCL INFO ncclCommInitRank comm 0x562baccbe560 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x6b4088a672825926 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1785299:1786942 [3] NCCL INFO ncclCommInitRank comm 0x5623b57dbdc0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x6b4088a672825926 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1785298:1786948 [2] NCCL INFO ncclCommInitRank comm 0x56381a8e64c0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x6b4088a672825926 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1785301:1786943 [5] NCCL INFO ncclCommInitRank comm 0x5641a6254620 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x6b4088a672825926 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1785302:1786940 [6] NCCL INFO ncclCommInitRank comm 0x5600dd085730 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x6b4088a672825926 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785303:1786944 [7] NCCL INFO ncclCommInitRank comm 0x55a892cdd9c0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x6b4088a672825926 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1785300:1786941 [4] NCCL INFO ncclCommInitRank comm 0x558677daf600 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x6b4088a672825926 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1785297:1786939 [1] NCCL INFO ncclCommInitRank comm 0x56443885e610 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x6b4088a672825926 - Init COMPLETE +[2025-10-10 06:03:29,816] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 06:03:31,529] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 06:03:44,483 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 06:03:44,483 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:001->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1785303:1791958 [7] NCCL INFO ncclCommInitRank comm 0x7fecb806a930 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xaf2aeebab1bc555a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785299:1791955 [3] NCCL INFO ncclCommInitRank comm 0x7fccd406ade0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xaf2aeebab1bc555a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785300:1791959 [4] NCCL INFO ncclCommInitRank comm 0x7fd75406abc0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xaf2aeebab1bc555a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785301:1791957 [5] NCCL INFO ncclCommInitRank comm 0x7fc15006ac80 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xaf2aeebab1bc555a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785297:1791956 [1] NCCL INFO ncclCommInitRank comm 0x7f543406adb0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xaf2aeebab1bc555a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785296:1791953 [0] NCCL INFO ncclCommInitRank comm 0x7f9d9006b110 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xaf2aeebab1bc555a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785298:1791960 [2] NCCL INFO ncclCommInitRank comm 0x7f0e2406a790 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xaf2aeebab1bc555a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1785302:1791954 [6] NCCL INFO ncclCommInitRank comm 0x7f6b2406b0d0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xaf2aeebab1bc555a - Init COMPLETE + 0%| | 1/520 [00:12<1:49:15, 12.63s/it] {'loss': 2.0453, 'grad_norm': 0.0048347790547687165, 'learning_rate': 0.0625, 'epoch': 0.0} + 0%| | 1/520 [00:12<1:49:15, 12.63s/it] 0%| | 2/520 [00:16<1:04:54, 7.52s/it] {'loss': 2.0549, 'grad_norm': 0.0052480884746959515, 'learning_rate': 0.125, 'epoch': 0.0} + 0%| | 2/520 [00:16<1:04:54, 7.52s/it] 1%| | 3/520 [00:20<50:30, 5.86s/it] {'loss': 2.1899, 'grad_norm': 0.0060066371312731, 'learning_rate': 0.1875, 'epoch': 0.01} + 1%| | 3/520 [00:20<50:30, 5.86s/it] 1%| | 4/520 [00:24<43:47, 5.09s/it] {'loss': 1.6695, 'grad_norm': 0.0014955110778293332, 'learning_rate': 0.25, 'epoch': 0.01} + 1%| | 4/520 [00:24<43:47, 5.09s/it] 1%| | 5/520 [00:28<40:12, 4.68s/it] {'loss': 1.6614, 'grad_norm': 0.0008065683230410209, 'learning_rate': 0.3125, 'epoch': 0.01} + 1%| | 5/520 [00:28<40:12, 4.68s/it] 1%| | 6/520 [00:32<37:56, 4.43s/it] {'loss': 1.3801, 'grad_norm': 0.0006428674068243615, 'learning_rate': 0.375, 'epoch': 0.01} + 1%| | 6/520 [00:32<37:56, 4.43s/it] 1%|▏ | 7/520 [00:36<36:31, 4.27s/it] {'loss': 1.4168, 'grad_norm': 0.0009206442279949906, 'learning_rate': 0.4375, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<36:31, 4.27s/it] 2%|▏ | 8/520 [00:40<37:17, 4.37s/it] {'loss': 1.4586, 'grad_norm': 0.0011377792339617228, 'learning_rate': 0.5, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<37:17, 4.37s/it] 2%|▏ | 9/520 [00:45<37:55, 4.45s/it] {'loss': 1.5352, 'grad_norm': 0.0020949868807409277, 'learning_rate': 0.5625, 'epoch': 0.02} + 2%|▏ | 9/520 [00:45<37:55, 4.45s/it] 2%|▏ | 10/520 [00:49<37:04, 4.36s/it] {'loss': 1.4031, 'grad_norm': 0.003053266839036213, 'learning_rate': 0.625, 'epoch': 0.02} + 2%|▏ | 10/520 [00:49<37:04, 4.36s/it] 2%|▏ | 11/520 [00:53<36:09, 4.26s/it] {'loss': 1.5696, 'grad_norm': 0.00415563291501255, 'learning_rate': 0.6875, 'epoch': 0.02} + 2%|▏ | 11/520 [00:53<36:09, 4.26s/it] 2%|▏ | 12/520 [00:57<35:11, 4.16s/it] {'loss': 1.6175, 'grad_norm': 0.007057603209677466, 'learning_rate': 0.75, 'epoch': 0.02} + 2%|▏ | 12/520 [00:57<35:11, 4.16s/it][2025-10-10 06:04:51,539] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:02<36:20, 4.30s/it] {'loss': 2.1985, 'grad_norm': 0.02397083658988974, 'learning_rate': 0.8125, 'epoch': 0.03} + 2%|▎ | 13/520 [01:02<36:20, 4.30s/it] 3%|▎ | 14/520 [01:06<35:17, 4.19s/it] {'loss': 2.7263, 'grad_norm': 0.036215547325151747, 'learning_rate': 0.875, 'epoch': 0.03} + 3%|▎ | 14/520 [01:06<35:17, 4.19s/it] 3%|▎ | 15/520 [01:10<34:40, 4.12s/it] {'loss': 3.5465, 'grad_norm': 0.10071306455046561, 'learning_rate': 0.9375, 'epoch': 0.03} + 3%|▎ | 15/520 [01:10<34:40, 4.12s/it] 3%|▎ | 16/520 [01:13<33:48, 4.03s/it] {'loss': 4.2079, 'grad_norm': 0.15213890693119564, 'learning_rate': 1.0, 'epoch': 0.03} + 3%|▎ | 16/520 [01:13<33:48, 4.03s/it] 3%|▎ | 17/520 [01:17<33:12, 3.96s/it] {'loss': 3.3766, 'grad_norm': 0.0702748795693221, 'learning_rate': 0.999990286465769, 'epoch': 0.03} + 3%|▎ | 17/520 [01:17<33:12, 3.96s/it] 3%|▎ | 18/520 [01:21<32:36, 3.90s/it] {'loss': 5.0409, 'grad_norm': 0.38125360973490907, 'learning_rate': 0.9999611462404874, 'epoch': 0.03} + 3%|▎ | 18/520 [01:21<32:36, 3.90s/it] 4%|▎ | 19/520 [01:25<32:10, 3.85s/it] {'loss': 5.7112, 'grad_norm': 0.09603643943279232, 'learning_rate': 0.9999125804563732, 'epoch': 0.04} + 4%|▎ | 19/520 [01:25<32:10, 3.85s/it] 4%|▍ | 20/520 [01:28<31:44, 3.81s/it] {'loss': 5.9238, 'grad_norm': 0.09774270205510538, 'learning_rate': 0.9998445910004081, 'epoch': 0.04} + 4%|▍ | 20/520 [01:28<31:44, 3.81s/it] 4%|▍ | 21/520 [01:32<31:22, 3.77s/it] {'loss': 4.7826, 'grad_norm': 0.03459056834190745, 'learning_rate': 0.9997571805142638, 'epoch': 0.04} + 4%|▍ | 21/520 [01:32<31:22, 3.77s/it] 4%|▍ | 22/520 [01:36<31:01, 3.74s/it] {'loss': 4.1881, 'grad_norm': 0.0449082274526351, 'learning_rate': 0.9996503523941993, 'epoch': 0.04} + 4%|▍ | 22/520 [01:36<31:01, 3.74s/it] 4%|▍ | 23/520 [01:39<30:55, 3.73s/it] {'loss': 3.9824, 'grad_norm': 0.022400224226156357, 'learning_rate': 0.999524110790929, 'epoch': 0.04} + 4%|▍ | 23/520 [01:39<30:55, 3.73s/it] 5%|▍ | 24/520 [01:43<30:39, 3.71s/it] {'loss': 4.136, 'grad_norm': 0.028380526714726642, 'learning_rate': 0.9993784606094611, 'epoch': 0.05} + 5%|▍ | 24/520 [01:43<30:39, 3.71s/it] 5%|▍ | 25/520 [01:47<30:34, 3.71s/it] {'loss': 3.0455, 'grad_norm': 0.009024640986183498, 'learning_rate': 0.9992134075089083, 'epoch': 0.05} + 5%|▍ | 25/520 [01:47<30:34, 3.71s/it] 5%|▌ | 26/520 [01:51<30:29, 3.70s/it] {'loss': 2.9443, 'grad_norm': 0.006955478815468332, 'learning_rate': 0.999028957902266, 'epoch': 0.05} + 5%|▌ | 26/520 [01:51<30:29, 3.70s/it] 5%|▌ | 27/520 [01:54<30:19, 3.69s/it] {'loss': 2.5354, 'grad_norm': 0.004198422361844466, 'learning_rate': 0.9988251189561644, 'epoch': 0.05} + 5%|▌ | 27/520 [01:54<30:19, 3.69s/it] 5%|▌ | 28/520 [01:58<30:21, 3.70s/it] {'loss': 2.3951, 'grad_norm': 0.0032666384251875645, 'learning_rate': 0.99860189859059, 'epoch': 0.05} + 5%|▌ | 28/520 [01:58<30:21, 3.70s/it] 6%|▌ | 29/520 [02:02<30:21, 3.71s/it] {'loss': 2.3158, 'grad_norm': 0.0032155482347343304, 'learning_rate': 0.9983593054785775, 'epoch': 0.06} + 6%|▌ | 29/520 [02:02<30:21, 3.71s/it] 6%|▌ | 30/520 [02:05<30:19, 3.71s/it] {'loss': 3.1015, 'grad_norm': 0.004124215195085771, 'learning_rate': 0.9980973490458728, 'epoch': 0.06} + 6%|▌ | 30/520 [02:05<30:19, 3.71s/it] 6%|▌ | 31/520 [02:09<30:21, 3.72s/it] {'loss': 2.2854, 'grad_norm': 0.0021722263311536676, 'learning_rate': 0.9978160394705669, 'epoch': 0.06} + 6%|▌ | 31/520 [02:09<30:21, 3.72s/it] 6%|▌ | 32/520 [02:13<30:16, 3.72s/it] {'loss': 3.4843, 'grad_norm': 0.006817262953645244, 'learning_rate': 0.9975153876827008, 'epoch': 0.06} + 6%|▌ | 32/520 [02:13<30:16, 3.72s/it] 6%|▋ | 33/520 [02:17<30:22, 3.74s/it] {'loss': 2.1679, 'grad_norm': 0.0024471610074683708, 'learning_rate': 0.9971954053638399, 'epoch': 0.06} + 6%|▋ | 33/520 [02:17<30:22, 3.74s/it] 7%|▋ | 34/520 [02:20<30:13, 3.73s/it] {'loss': 2.0889, 'grad_norm': 0.0022075041085025384, 'learning_rate': 0.9968561049466214, 'epoch': 0.07} + 7%|▋ | 34/520 [02:20<30:13, 3.73s/it] 7%|▋ | 35/520 [02:24<30:08, 3.73s/it] {'loss': 2.0674, 'grad_norm': 0.0024354500247193533, 'learning_rate': 0.9964974996142697, 'epoch': 0.07} + 7%|▋ | 35/520 [02:24<30:08, 3.73s/it] 7%|▋ | 36/520 [02:28<30:13, 3.75s/it] {'loss': 2.2315, 'grad_norm': 0.001795988441412275, 'learning_rate': 0.9961196033000861, 'epoch': 0.07} + 7%|▋ | 36/520 [02:28<30:13, 3.75s/it] 7%|▋ | 37/520 [02:32<30:03, 3.73s/it] {'loss': 2.5881, 'grad_norm': 0.0028058571207789738, 'learning_rate': 0.9957224306869052, 'epoch': 0.07} + 7%|▋ | 37/520 [02:32<30:03, 3.73s/it] 7%|▋ | 38/520 [02:35<29:53, 3.72s/it] {'loss': 2.2607, 'grad_norm': 0.001545726121779558, 'learning_rate': 0.9953059972065263, 'epoch': 0.07} + 7%|▋ | 38/520 [02:35<29:53, 3.72s/it] 8%|▊ | 39/520 [02:39<29:46, 3.71s/it] {'loss': 2.0274, 'grad_norm': 0.0020406501869398535, 'learning_rate': 0.994870319039113, 'epoch': 0.07} + 8%|▊ | 39/520 [02:39<29:46, 3.71s/it] 8%|▊ | 40/520 [02:43<29:41, 3.71s/it] {'loss': 2.0196, 'grad_norm': 0.0015024092474075041, 'learning_rate': 0.9944154131125642, 'epoch': 0.08} + 8%|▊ | 40/520 [02:43<29:41, 3.71s/it] 8%|▊ | 41/520 [02:46<29:36, 3.71s/it] {'loss': 1.9862, 'grad_norm': 0.0014668915057084017, 'learning_rate': 0.9939412971018573, 'epoch': 0.08} + 8%|▊ | 41/520 [02:46<29:36, 3.71s/it] 8%|▊ | 42/520 [02:50<29:23, 3.69s/it] {'loss': 2.0375, 'grad_norm': 0.0018016424262653956, 'learning_rate': 0.9934479894283605, 'epoch': 0.08} + 8%|▊ | 42/520 [02:50<29:23, 3.69s/it] 8%|▊ | 43/520 [02:54<29:16, 3.68s/it] {'loss': 2.3216, 'grad_norm': 0.0018361069235314222, 'learning_rate': 0.9929355092591179, 'epoch': 0.08} + 8%|▊ | 43/520 [02:54<29:16, 3.68s/it] 8%|▊ | 44/520 [02:57<29:11, 3.68s/it] {'loss': 2.4276, 'grad_norm': 0.0026636585232809504, 'learning_rate': 0.9924038765061041, 'epoch': 0.08} + 8%|▊ | 44/520 [02:57<29:11, 3.68s/it] 9%|▊ | 45/520 [03:01<29:04, 3.67s/it] {'loss': 1.9525, 'grad_norm': 0.0012911382454120458, 'learning_rate': 0.9918531118254507, 'epoch': 0.09} + 9%|▊ | 45/520 [03:01<29:04, 3.67s/it] 9%|▉ | 46/520 [03:05<29:02, 3.68s/it] {'loss': 2.4686, 'grad_norm': 0.0020949546550287457, 'learning_rate': 0.9912832366166442, 'epoch': 0.09} + 9%|▉ | 46/520 [03:05<29:02, 3.68s/it] 9%|▉ | 47/520 [03:08<28:56, 3.67s/it] {'loss': 1.9674, 'grad_norm': 0.0012730711209645463, 'learning_rate': 0.9906942730216939, 'epoch': 0.09} + 9%|▉ | 47/520 [03:08<28:56, 3.67s/it] 9%|▉ | 48/520 [03:12<28:50, 3.67s/it] {'loss': 1.8952, 'grad_norm': 0.0012806613982602236, 'learning_rate': 0.9900862439242719, 'epoch': 0.09} + 9%|▉ | 48/520 [03:12<28:50, 3.67s/it] 9%|▉ | 49/520 [03:16<29:01, 3.70s/it] {'loss': 1.9075, 'grad_norm': 0.001270661657877234, 'learning_rate': 0.9894591729488242, 'epoch': 0.09} + 9%|▉ | 49/520 [03:16<29:01, 3.70s/it] 10%|▉ | 50/520 [03:19<28:52, 3.69s/it] {'loss': 1.9115, 'grad_norm': 0.001477384353391769, 'learning_rate': 0.9888130844596523, 'epoch': 0.1} + 10%|▉ | 50/520 [03:19<28:52, 3.69s/it] 10%|▉ | 51/520 [03:23<28:45, 3.68s/it] {'loss': 1.7755, 'grad_norm': 0.0015120221941398383, 'learning_rate': 0.9881480035599667, 'epoch': 0.1} + 10%|▉ | 51/520 [03:23<28:45, 3.68s/it] 10%|█ | 52/520 [03:27<28:40, 3.68s/it] {'loss': 1.967, 'grad_norm': 0.0015619506545197055, 'learning_rate': 0.9874639560909118, 'epoch': 0.1} + 10%|█ | 52/520 [03:27<28:40, 3.68s/it] 10%|█ | 53/520 [03:30<28:42, 3.69s/it] {'loss': 1.9615, 'grad_norm': 0.001332125842770307, 'learning_rate': 0.9867609686305616, 'epoch': 0.1} + 10%|█ | 53/520 [03:30<28:42, 3.69s/it] 10%|█ | 54/520 [03:34<28:35, 3.68s/it] {'loss': 1.7929, 'grad_norm': 0.0011283020605915115, 'learning_rate': 0.9860390684928872, 'epoch': 0.1} + 10%|█ | 54/520 [03:34<28:35, 3.68s/it] 11%|█ | 55/520 [03:38<28:39, 3.70s/it] {'loss': 1.8057, 'grad_norm': 0.0011993165451090716, 'learning_rate': 0.9852982837266955, 'epoch': 0.11} + 11%|█ | 55/520 [03:38<28:39, 3.70s/it] 11%|█ | 56/520 [03:42<28:35, 3.70s/it] {'loss': 1.9446, 'grad_norm': 0.001129621593822934, 'learning_rate': 0.984538643114539, 'epoch': 0.11} + 11%|█ | 56/520 [03:42<28:35, 3.70s/it] 11%|█ | 57/520 [03:45<28:37, 3.71s/it] {'loss': 1.7732, 'grad_norm': 0.0011506280981650008, 'learning_rate': 0.9837601761715982, 'epoch': 0.11} + 11%|█ | 57/520 [03:45<28:37, 3.71s/it] 11%|█ | 58/520 [03:49<28:34, 3.71s/it] {'loss': 1.9447, 'grad_norm': 0.0010450340869732292, 'learning_rate': 0.9829629131445341, 'epoch': 0.11} + 11%|█ | 58/520 [03:49<28:34, 3.71s/it] 11%|█▏ | 59/520 [03:53<28:27, 3.70s/it] {'loss': 2.0429, 'grad_norm': 0.001574357325928879, 'learning_rate': 0.9821468850103139, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:53<28:27, 3.70s/it] 12%|█▏ | 60/520 [03:56<28:28, 3.71s/it] {'loss': 1.8774, 'grad_norm': 0.0009654702291884605, 'learning_rate': 0.981312123475006, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:56<28:28, 3.71s/it] 12%|█▏ | 61/520 [04:00<28:25, 3.72s/it] {'loss': 2.248, 'grad_norm': 0.0011793382934082958, 'learning_rate': 0.9804586609725499, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:00<28:25, 3.72s/it] 12%|█▏ | 62/520 [04:04<28:14, 3.70s/it] {'loss': 1.7985, 'grad_norm': 0.001256725335312132, 'learning_rate': 0.9795865306634939, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:04<28:14, 3.70s/it] 12%|█▏ | 63/520 [04:08<28:30, 3.74s/it] {'loss': 1.8538, 'grad_norm': 0.001031574702199794, 'learning_rate': 0.978695766433709, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:08<28:30, 3.74s/it] 12%|█▏ | 64/520 [04:11<28:20, 3.73s/it] {'loss': 1.8419, 'grad_norm': 0.0009790094960206949, 'learning_rate': 0.9777864028930704, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:11<28:20, 3.73s/it] 12%|█▎ | 65/520 [04:15<28:18, 3.73s/it] {'loss': 1.8792, 'grad_norm': 0.0010745401332639044, 'learning_rate': 0.9768584753741134, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:15<28:18, 3.73s/it] 13%|█▎ | 66/520 [04:19<28:09, 3.72s/it] {'loss': 1.8372, 'grad_norm': 0.0008825245472134194, 'learning_rate': 0.9759120199306612, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:19<28:09, 3.72s/it] 13%|█▎ | 67/520 [04:22<28:06, 3.72s/it] {'loss': 1.6669, 'grad_norm': 0.001032081633922535, 'learning_rate': 0.9749470733364229, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:22<28:06, 3.72s/it] 13%|█▎ | 68/520 [04:26<28:00, 3.72s/it] {'loss': 1.7039, 'grad_norm': 0.0010873856638657794, 'learning_rate': 0.9739636730835659, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:26<28:00, 3.72s/it] 13%|█▎ | 69/520 [04:30<27:59, 3.72s/it] {'loss': 1.6642, 'grad_norm': 0.0010102958409795902, 'learning_rate': 0.972961857381258, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:30<27:59, 3.72s/it] 13%|█▎ | 70/520 [04:34<27:50, 3.71s/it] {'loss': 1.7638, 'grad_norm': 0.001318048339150614, 'learning_rate': 0.9719416651541838, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:34<27:50, 3.71s/it] 14%|█▎ | 71/520 [04:37<27:58, 3.74s/it] {'loss': 1.6518, 'grad_norm': 0.0012540032074982313, 'learning_rate': 0.9709031360410317, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:37<27:58, 3.74s/it] 14%|█▍ | 72/520 [04:41<27:56, 3.74s/it] {'loss': 1.8153, 'grad_norm': 0.0008583406327146982, 'learning_rate': 0.9698463103929542, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:41<27:56, 3.74s/it] 14%|█▍ | 73/520 [04:45<27:56, 3.75s/it] {'loss': 1.6079, 'grad_norm': 0.0009111222710261707, 'learning_rate': 0.9687712292719997, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:45<27:56, 3.75s/it] 14%|█▍ | 74/520 [04:49<27:55, 3.76s/it] {'loss': 1.7463, 'grad_norm': 0.0009954429110491944, 'learning_rate': 0.967677934449517, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:49<27:55, 3.76s/it] 14%|█▍ | 75/520 [04:52<27:49, 3.75s/it] {'loss': 1.6268, 'grad_norm': 0.0007621590787850518, 'learning_rate': 0.9665664684045332, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:52<27:49, 3.75s/it] 15%|█▍ | 76/520 [04:56<27:50, 3.76s/it] {'loss': 2.1735, 'grad_norm': 0.0012052754542324318, 'learning_rate': 0.9654368743221021, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:56<27:50, 3.76s/it] 15%|█▍ | 77/520 [05:00<27:40, 3.75s/it] {'loss': 1.5751, 'grad_norm': 0.0011061139127537018, 'learning_rate': 0.9642891960916268, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:00<27:40, 3.75s/it] 15%|█▌ | 78/520 [05:04<27:42, 3.76s/it] {'loss': 1.7085, 'grad_norm': 0.0008586620035787012, 'learning_rate': 0.9631234783051543, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:04<27:42, 3.76s/it] 15%|█▌ | 79/520 [05:08<27:45, 3.78s/it] {'loss': 1.6774, 'grad_norm': 0.0007365653388477774, 'learning_rate': 0.9619397662556434, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:08<27:45, 3.78s/it] 15%|█▌ | 80/520 [05:11<27:32, 3.76s/it] {'loss': 2.2068, 'grad_norm': 0.001433378141478937, 'learning_rate': 0.9607381059352038, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:11<27:32, 3.76s/it] 16%|█▌ | 81/520 [05:15<27:42, 3.79s/it] {'loss': 1.9166, 'grad_norm': 0.00135011027840717, 'learning_rate': 0.9595185440333103, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:15<27:42, 3.79s/it] 16%|█▌ | 82/520 [05:19<27:47, 3.81s/it] {'loss': 1.78, 'grad_norm': 0.0008029238222548775, 'learning_rate': 0.9582811279349881, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:19<27:47, 3.81s/it] 16%|█▌ | 83/520 [05:23<27:40, 3.80s/it] {'loss': 1.8113, 'grad_norm': 0.0011811755346565526, 'learning_rate': 0.9570259057189716, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:23<27:40, 3.80s/it] 16%|█▌ | 84/520 [05:26<27:27, 3.78s/it] {'loss': 1.7659, 'grad_norm': 0.0008150157461860591, 'learning_rate': 0.9557529261558366, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:26<27:27, 3.78s/it] 16%|█▋ | 85/520 [05:30<27:21, 3.77s/it] {'loss': 1.7796, 'grad_norm': 0.0007456538531709484, 'learning_rate': 0.9544622387061055, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:30<27:21, 3.77s/it] 17%|█▋ | 86/520 [05:34<27:12, 3.76s/it] {'loss': 1.8772, 'grad_norm': 0.0007989628358673624, 'learning_rate': 0.953153893518325, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:34<27:12, 3.76s/it] 17%|█▋ | 87/520 [05:38<27:05, 3.76s/it] {'loss': 2.1492, 'grad_norm': 0.0008895245180303605, 'learning_rate': 0.9518279414271184, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:38<27:05, 3.76s/it] 17%|█▋ | 88/520 [05:41<27:02, 3.76s/it] {'loss': 2.3402, 'grad_norm': 0.0010378107461544836, 'learning_rate': 0.9504844339512095, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:41<27:02, 3.76s/it] 17%|█▋ | 89/520 [05:45<26:58, 3.76s/it] {'loss': 1.739, 'grad_norm': 0.0007961776022503064, 'learning_rate': 0.9491234232914221, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:45<26:58, 3.76s/it] 17%|█▋ | 90/520 [05:49<26:46, 3.74s/it] {'loss': 1.6703, 'grad_norm': 0.0007406751725474719, 'learning_rate': 0.9477449623286505, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:49<26:46, 3.74s/it] 18%|█▊ | 91/520 [05:53<26:37, 3.72s/it] {'loss': 1.7754, 'grad_norm': 0.0006663510660330346, 'learning_rate': 0.9463491046218058, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:53<26:37, 3.72s/it] 18%|█▊ | 92/520 [05:56<26:32, 3.72s/it] {'loss': 1.7044, 'grad_norm': 0.000802606413245924, 'learning_rate': 0.9449359044057344, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:56<26:32, 3.72s/it] 18%|█▊ | 93/520 [06:00<26:22, 3.71s/it] {'loss': 1.6707, 'grad_norm': 0.0008731847478086223, 'learning_rate': 0.9435054165891108, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:00<26:22, 3.71s/it] 18%|█▊ | 94/520 [06:04<26:25, 3.72s/it] {'loss': 1.8265, 'grad_norm': 0.0007420008763326764, 'learning_rate': 0.9420576967523049, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:04<26:25, 3.72s/it] 18%|█▊ | 95/520 [06:08<26:26, 3.73s/it] {'loss': 1.669, 'grad_norm': 0.0008037890158683557, 'learning_rate': 0.9405928011452211, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:08<26:26, 3.73s/it] 18%|█▊ | 96/520 [06:11<26:21, 3.73s/it] {'loss': 1.6812, 'grad_norm': 0.0008099772232987448, 'learning_rate': 0.9391107866851143, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:11<26:21, 3.73s/it] 19%|█▊ | 97/520 [06:15<26:21, 3.74s/it] {'loss': 1.6533, 'grad_norm': 0.0007463296021541333, 'learning_rate': 0.9376117109543769, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:15<26:21, 3.74s/it] 19%|█▉ | 98/520 [06:19<26:21, 3.75s/it] {'loss': 1.6556, 'grad_norm': 0.0006154179075818861, 'learning_rate': 0.9360956321983027, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:19<26:21, 3.75s/it] 19%|█▉ | 99/520 [06:23<26:15, 3.74s/it] {'loss': 1.6707, 'grad_norm': 0.0006790439838924538, 'learning_rate': 0.9345626093228232, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:23<26:15, 3.74s/it] 19%|█▉ | 100/520 [06:26<26:08, 3.73s/it] {'loss': 1.9469, 'grad_norm': 0.001004738286772696, 'learning_rate': 0.9330127018922194, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:26<26:08, 3.73s/it] 19%|█▉ | 101/520 [06:30<25:56, 3.71s/it] {'loss': 1.6776, 'grad_norm': 0.0010787475332278778, 'learning_rate': 0.9314459701268065, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:30<25:56, 3.71s/it] 20%|█▉ | 102/520 [06:34<25:48, 3.70s/it] {'loss': 1.6503, 'grad_norm': 0.0007387797738723926, 'learning_rate': 0.9298624749005951, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:34<25:48, 3.70s/it] 20%|█▉ | 103/520 [06:37<25:42, 3.70s/it] {'loss': 1.6285, 'grad_norm': 0.000770875610541487, 'learning_rate': 0.9282622777389258, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:37<25:42, 3.70s/it] 20%|██ | 104/520 [06:41<26:06, 3.77s/it] {'loss': 1.6887, 'grad_norm': 0.0007498528228450669, 'learning_rate': 0.9266454408160778, 'epoch': 0.2} + 20%|██ | 104/520 [06:41<26:06, 3.77s/it] 20%|██ | 105/520 [06:45<26:28, 3.83s/it] {'loss': 1.6748, 'grad_norm': 0.00069829351339502, 'learning_rate': 0.9250120269528546, 'epoch': 0.2} + 20%|██ | 105/520 [06:45<26:28, 3.83s/it] 20%|██ | 106/520 [06:49<26:37, 3.86s/it] {'loss': 1.9552, 'grad_norm': 0.0011687707400096787, 'learning_rate': 0.9233620996141421, 'epoch': 0.2} + 20%|██ | 106/520 [06:49<26:37, 3.86s/it] 21%|██ | 107/520 [06:53<26:13, 3.81s/it] {'loss': 1.9643, 'grad_norm': 0.001236890819238412, 'learning_rate': 0.9216957229064429, 'epoch': 0.21} + 21%|██ | 107/520 [06:53<26:13, 3.81s/it] 21%|██ | 108/520 [06:57<26:05, 3.80s/it] {'loss': 1.6232, 'grad_norm': 0.0007795448090650323, 'learning_rate': 0.9200129615753858, 'epoch': 0.21} + 21%|██ | 108/520 [06:57<26:05, 3.80s/it] 21%|██ | 109/520 [07:00<25:51, 3.77s/it] {'loss': 1.9403, 'grad_norm': 0.0007896514565393632, 'learning_rate': 0.9183138810032099, 'epoch': 0.21} + 21%|██ | 109/520 [07:00<25:51, 3.77s/it] 21%|██ | 110/520 [07:04<25:36, 3.75s/it] {'loss': 1.8364, 'grad_norm': 0.0006364427733701742, 'learning_rate': 0.9165985472062245, 'epoch': 0.21} + 21%|██ | 110/520 [07:04<25:36, 3.75s/it] 21%|██▏ | 111/520 [07:08<25:33, 3.75s/it] {'loss': 1.8409, 'grad_norm': 0.0006702889593964414, 'learning_rate': 0.9148670268322437, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:08<25:33, 3.75s/it] 22%|██▏ | 112/520 [07:12<25:46, 3.79s/it] {'loss': 1.7217, 'grad_norm': 0.0007340184301262216, 'learning_rate': 0.9131193871579975, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:12<25:46, 3.79s/it] 22%|██▏ | 113/520 [07:15<25:55, 3.82s/it] {'loss': 1.5718, 'grad_norm': 0.0007713692136038189, 'learning_rate': 0.9113556960865167, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:15<25:55, 3.82s/it] 22%|██▏ | 114/520 [07:19<26:05, 3.86s/it] {'loss': 1.698, 'grad_norm': 0.0006362909200742036, 'learning_rate': 0.909576022144496, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:19<26:05, 3.86s/it] 22%|██▏ | 115/520 [07:23<26:04, 3.86s/it] {'loss': 1.8325, 'grad_norm': 0.000622389994064237, 'learning_rate': 0.9077804344796301, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:23<26:04, 3.86s/it] 22%|██▏ | 116/520 [07:27<26:11, 3.89s/it] {'loss': 1.7969, 'grad_norm': 0.0006313040160343008, 'learning_rate': 0.9059690028579284, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:27<26:11, 3.89s/it] 22%|██▎ | 117/520 [07:31<26:04, 3.88s/it] {'loss': 1.7839, 'grad_norm': 0.0006205959380648964, 'learning_rate': 0.9041417976610027, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:31<26:04, 3.88s/it] 23%|██▎ | 118/520 [07:35<26:10, 3.91s/it] {'loss': 1.6482, 'grad_norm': 0.0005889948300792602, 'learning_rate': 0.9022988898833342, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:35<26:10, 3.91s/it] 23%|██▎ | 119/520 [07:39<26:09, 3.91s/it] {'loss': 1.5761, 'grad_norm': 0.0006326673401956365, 'learning_rate': 0.900440351129514, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:39<26:09, 3.91s/it] 23%|██▎ | 120/520 [07:43<26:07, 3.92s/it] {'loss': 1.6312, 'grad_norm': 0.0007839756830538104, 'learning_rate': 0.8985662536114613, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:43<26:07, 3.92s/it] 23%|██▎ | 121/520 [07:47<26:00, 3.91s/it] {'loss': 1.6826, 'grad_norm': 0.0006100752430426187, 'learning_rate': 0.8966766701456176, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:47<26:00, 3.91s/it] 23%|██▎ | 122/520 [07:51<25:59, 3.92s/it] {'loss': 1.5364, 'grad_norm': 0.0005572875616017637, 'learning_rate': 0.8947716741501177, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:51<25:59, 3.92s/it] 24%|██▎ | 123/520 [07:55<25:50, 3.90s/it] {'loss': 2.036, 'grad_norm': 0.0007176744591505988, 'learning_rate': 0.8928513396419369, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:55<25:50, 3.90s/it] 24%|██▍ | 124/520 [07:59<25:49, 3.91s/it] {'loss': 1.6545, 'grad_norm': 0.0006242735023538432, 'learning_rate': 0.890915741234015, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:59<25:49, 3.91s/it] 24%|██▍ | 125/520 [08:02<25:39, 3.90s/it] {'loss': 1.6152, 'grad_norm': 0.0006284331593827658, 'learning_rate': 0.8889649541323574, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:02<25:39, 3.90s/it] 24%|██▍ | 126/520 [08:07<26:59, 4.11s/it] {'loss': 1.8679, 'grad_norm': 0.0007264636675480234, 'learning_rate': 0.8869990541331138, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:07<26:59, 4.11s/it] 24%|██▍ | 127/520 [08:11<26:29, 4.04s/it] {'loss': 1.5993, 'grad_norm': 0.000751692387029252, 'learning_rate': 0.8850181176196315, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:11<26:29, 4.04s/it] 25%|██▍ | 128/520 [08:15<26:07, 4.00s/it] {'loss': 1.6724, 'grad_norm': 0.0005862183580805009, 'learning_rate': 0.883022221559489, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:15<26:07, 4.00s/it] 25%|██▍ | 129/520 [08:19<25:51, 3.97s/it] {'loss': 1.5833, 'grad_norm': 0.0006295238039434366, 'learning_rate': 0.8810114435015054, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:19<25:51, 3.97s/it] 25%|██▌ | 130/520 [08:23<25:41, 3.95s/it] {'loss': 1.6619, 'grad_norm': 0.0005397681807713415, 'learning_rate': 0.8789858615727265, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:23<25:41, 3.95s/it] 25%|██▌ | 131/520 [08:27<25:31, 3.94s/it] {'loss': 1.8606, 'grad_norm': 0.000601323524944539, 'learning_rate': 0.8769455544753899, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:27<25:31, 3.94s/it] 25%|██▌ | 132/520 [08:30<25:23, 3.93s/it] {'loss': 1.6999, 'grad_norm': 0.0006385422988980309, 'learning_rate': 0.8748906014838671, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:30<25:23, 3.93s/it] 26%|██▌ | 133/520 [08:34<25:23, 3.94s/it] {'loss': 1.5992, 'grad_norm': 0.000675857933996527, 'learning_rate': 0.8728210824415827, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:34<25:23, 3.94s/it] 26%|██▌ | 134/520 [08:38<25:15, 3.93s/it] {'loss': 1.6928, 'grad_norm': 0.0005827723548694978, 'learning_rate': 0.8707370777579133, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:38<25:15, 3.93s/it] 26%|██▌ | 135/520 [08:42<25:06, 3.91s/it] {'loss': 1.777, 'grad_norm': 0.0005491167444029124, 'learning_rate': 0.868638668405062, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:42<25:06, 3.91s/it] 26%|██▌ | 136/520 [08:46<25:00, 3.91s/it] {'loss': 1.6824, 'grad_norm': 0.0005416703094086081, 'learning_rate': 0.8665259359149131, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:46<25:00, 3.91s/it] 26%|██▋ | 137/520 [08:50<24:53, 3.90s/it] {'loss': 1.6195, 'grad_norm': 0.0007675960586596457, 'learning_rate': 0.8643989623758642, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:50<24:53, 3.90s/it] 27%|██▋ | 138/520 [08:54<24:43, 3.88s/it] {'loss': 1.6055, 'grad_norm': 0.0006013864562001815, 'learning_rate': 0.8622578304296363, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:54<24:43, 3.88s/it] 27%|██▋ | 139/520 [08:58<24:42, 3.89s/it] {'loss': 1.7477, 'grad_norm': 0.0006672591495756671, 'learning_rate': 0.8601026232680633, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:58<24:42, 3.89s/it] 27%|██▋ | 140/520 [09:02<24:33, 3.88s/it] {'loss': 1.9032, 'grad_norm': 0.0007108180949327545, 'learning_rate': 0.8579334246298592, 'epoch': 0.27} + 27%|██▋ | 140/520 [09:02<24:33, 3.88s/it] 27%|██▋ | 141/520 [09:05<24:31, 3.88s/it] {'loss': 1.7349, 'grad_norm': 0.00048794697022358887, 'learning_rate': 0.8557503187973651, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:05<24:31, 3.88s/it] 27%|██▋ | 142/520 [09:09<24:28, 3.89s/it] {'loss': 1.9491, 'grad_norm': 0.0006181419327025272, 'learning_rate': 0.8535533905932737, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:09<24:28, 3.89s/it] 28%|██▊ | 143/520 [09:13<24:31, 3.90s/it] {'loss': 1.6561, 'grad_norm': 0.0006092702256816371, 'learning_rate': 0.8513427253773346, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:13<24:31, 3.90s/it] 28%|██▊ | 144/520 [09:17<24:23, 3.89s/it] {'loss': 1.5785, 'grad_norm': 0.00065476525242557, 'learning_rate': 0.8491184090430364, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:17<24:23, 3.89s/it] 28%|██▊ | 145/520 [09:21<24:22, 3.90s/it] {'loss': 1.5189, 'grad_norm': 0.0005053364622706218, 'learning_rate': 0.8468805280142708, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:21<24:22, 3.90s/it] 28%|██▊ | 146/520 [09:25<24:17, 3.90s/it] {'loss': 1.9891, 'grad_norm': 0.0006354363920744905, 'learning_rate': 0.8446291692419735, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:25<24:17, 3.90s/it] 28%|██▊ | 147/520 [09:29<24:12, 3.89s/it] {'loss': 1.5469, 'grad_norm': 0.0005648524763815097, 'learning_rate': 0.8423644202007468, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:29<24:12, 3.89s/it] 28%|██▊ | 148/520 [09:33<24:05, 3.89s/it] {'loss': 1.607, 'grad_norm': 0.0005237449372553632, 'learning_rate': 0.8400863688854596, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:33<24:05, 3.89s/it] 29%|██▊ | 149/520 [09:37<24:10, 3.91s/it] {'loss': 1.5578, 'grad_norm': 0.0006198392337027378, 'learning_rate': 0.8377951038078302, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:37<24:10, 3.91s/it] 29%|██▉ | 150/520 [09:41<24:04, 3.90s/it] {'loss': 1.7813, 'grad_norm': 0.0005360212361657272, 'learning_rate': 0.835490713992985, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:41<24:04, 3.90s/it] 29%|██▉ | 151/520 [09:44<23:59, 3.90s/it] {'loss': 1.5868, 'grad_norm': 0.0006211538488690823, 'learning_rate': 0.833173288976002, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:44<23:59, 3.90s/it] 29%|██▉ | 152/520 [09:48<23:33, 3.84s/it] {'loss': 1.562, 'grad_norm': 0.0005970958630333343, 'learning_rate': 0.8308429187984298, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:48<23:33, 3.84s/it] 29%|██▉ | 153/520 [09:52<23:12, 3.79s/it] {'loss': 1.5847, 'grad_norm': 0.0005050392427723051, 'learning_rate': 0.8284996940047903, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:52<23:12, 3.79s/it] 30%|██▉ | 154/520 [09:56<22:59, 3.77s/it] {'loss': 1.7011, 'grad_norm': 0.0005321075216188595, 'learning_rate': 0.8261437056390606, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:56<22:59, 3.77s/it] 30%|██▉ | 155/520 [09:59<22:55, 3.77s/it] {'loss': 1.5575, 'grad_norm': 0.0005605529403724606, 'learning_rate': 0.8237750452411352, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:59<22:55, 3.77s/it] 30%|███ | 156/520 [10:03<22:51, 3.77s/it] {'loss': 1.6316, 'grad_norm': 0.0007475044099928432, 'learning_rate': 0.8213938048432696, 'epoch': 0.3} + 30%|███ | 156/520 [10:03<22:51, 3.77s/it] 30%|███ | 157/520 [10:07<22:43, 3.76s/it] {'loss': 2.004, 'grad_norm': 0.0016219327402698205, 'learning_rate': 0.8190000769665043, 'epoch': 0.3} + 30%|███ | 157/520 [10:07<22:43, 3.76s/it] 30%|███ | 158/520 [10:11<22:34, 3.74s/it] {'loss': 1.5744, 'grad_norm': 0.0007220520621932644, 'learning_rate': 0.81659395461707, 'epoch': 0.3} + 30%|███ | 158/520 [10:11<22:34, 3.74s/it] 31%|███ | 159/520 [10:14<22:26, 3.73s/it] {'loss': 1.6064, 'grad_norm': 0.000481730127218201, 'learning_rate': 0.8141755312827736, 'epoch': 0.31} + 31%|███ | 159/520 [10:14<22:26, 3.73s/it] 31%|███ | 160/520 [10:18<22:19, 3.72s/it] {'loss': 1.6434, 'grad_norm': 0.0005974752716862106, 'learning_rate': 0.8117449009293668, 'epoch': 0.31} + 31%|███ | 160/520 [10:18<22:19, 3.72s/it] 31%|███ | 161/520 [10:22<22:09, 3.70s/it] {'loss': 1.6549, 'grad_norm': 0.000565091439361782, 'learning_rate': 0.8093021579968941, 'epoch': 0.31} + 31%|███ | 161/520 [10:22<22:09, 3.70s/it] 31%|███ | 162/520 [10:25<22:03, 3.70s/it] {'loss': 1.8901, 'grad_norm': 0.0007448465367322633, 'learning_rate': 0.8068473973960237, 'epoch': 0.31} + 31%|███ | 162/520 [10:25<22:03, 3.70s/it] 31%|███▏ | 163/520 [10:29<22:02, 3.71s/it] {'loss': 1.5243, 'grad_norm': 0.0006474225174079512, 'learning_rate': 0.8043807145043603, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:29<22:02, 3.71s/it] 32%|███▏ | 164/520 [10:33<21:55, 3.69s/it] {'loss': 1.4665, 'grad_norm': 0.0005862581485860445, 'learning_rate': 0.8019022051627387, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:33<21:55, 3.69s/it] 32%|███▏ | 165/520 [10:36<21:54, 3.70s/it] {'loss': 1.6036, 'grad_norm': 0.0006682647389921754, 'learning_rate': 0.7994119656715002, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:36<21:54, 3.70s/it] 32%|███▏ | 166/520 [10:40<21:49, 3.70s/it] {'loss': 1.5949, 'grad_norm': 0.0005702209108324383, 'learning_rate': 0.7969100927867507, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:40<21:49, 3.70s/it] 32%|███▏ | 167/520 [10:44<21:46, 3.70s/it] {'loss': 1.5859, 'grad_norm': 0.000538178727093184, 'learning_rate': 0.7943966837166023, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:44<21:46, 3.70s/it] 32%|███▏ | 168/520 [10:48<21:47, 3.71s/it] {'loss': 1.5528, 'grad_norm': 0.0005082033725343603, 'learning_rate': 0.791871836117395, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:48<21:47, 3.71s/it] 32%|███▎ | 169/520 [10:51<21:52, 3.74s/it] {'loss': 1.6077, 'grad_norm': 0.00044863177135288804, 'learning_rate': 0.789335648089903, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:51<21:52, 3.74s/it] 33%|███▎ | 170/520 [10:55<21:50, 3.74s/it] {'loss': 1.7859, 'grad_norm': 0.0006282933996891067, 'learning_rate': 0.786788218175523, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:55<21:50, 3.74s/it] 33%|███▎ | 171/520 [10:59<21:44, 3.74s/it] {'loss': 1.5421, 'grad_norm': 0.0005220402550600699, 'learning_rate': 0.7842296453524462, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:59<21:44, 3.74s/it] 33%|███▎ | 172/520 [11:03<21:35, 3.72s/it] {'loss': 1.624, 'grad_norm': 0.000484635257151111, 'learning_rate': 0.781660029031811, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:03<21:35, 3.72s/it] 33%|███▎ | 173/520 [11:06<21:29, 3.71s/it] {'loss': 1.5352, 'grad_norm': 0.0005353281474131645, 'learning_rate': 0.7790794690538421, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:06<21:29, 3.71s/it] 33%|███▎ | 174/520 [11:10<21:31, 3.73s/it] {'loss': 1.6468, 'grad_norm': 0.000555452952717815, 'learning_rate': 0.7764880656839697, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:10<21:31, 3.73s/it] 34%|███▎ | 175/520 [11:14<21:26, 3.73s/it] {'loss': 1.5362, 'grad_norm': 0.00047457783379995015, 'learning_rate': 0.7738859196089357, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:14<21:26, 3.73s/it] 34%|███▍ | 176/520 [11:17<21:23, 3.73s/it] {'loss': 1.9141, 'grad_norm': 0.0005116157454462945, 'learning_rate': 0.7712731319328797, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:17<21:23, 3.73s/it] 34%|███▍ | 177/520 [11:21<21:19, 3.73s/it] {'loss': 1.7572, 'grad_norm': 0.000491855139083959, 'learning_rate': 0.768649804173412, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:21<21:19, 3.73s/it] 34%|███▍ | 178/520 [11:25<21:12, 3.72s/it] {'loss': 1.5809, 'grad_norm': 0.00047088345343561605, 'learning_rate': 0.7660160382576683, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:25<21:12, 3.72s/it] 34%|███▍ | 179/520 [11:29<21:06, 3.71s/it] {'loss': 1.6973, 'grad_norm': 0.0005303138725540197, 'learning_rate': 0.7633719365183503, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:29<21:06, 3.71s/it] 35%|███▍ | 180/520 [11:32<21:02, 3.71s/it] {'loss': 1.5829, 'grad_norm': 0.00046041119567633, 'learning_rate': 0.760717601689749, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:32<21:02, 3.71s/it] 35%|███▍ | 181/520 [11:36<21:06, 3.73s/it] {'loss': 1.5796, 'grad_norm': 0.000521606856632562, 'learning_rate': 0.7580531369037533, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:36<21:06, 3.73s/it] 35%|███▌ | 182/520 [11:40<20:58, 3.72s/it] {'loss': 1.6111, 'grad_norm': 0.0005035976458759542, 'learning_rate': 0.7553786456858429, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:40<20:58, 3.72s/it] 35%|███▌ | 183/520 [11:43<20:55, 3.73s/it] {'loss': 1.6151, 'grad_norm': 0.0004817557131773946, 'learning_rate': 0.7526942319510654, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:43<20:55, 3.73s/it] 35%|███▌ | 184/520 [11:47<20:53, 3.73s/it] {'loss': 1.5214, 'grad_norm': 0.000536104084568866, 'learning_rate': 0.75, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:47<20:53, 3.73s/it] 36%|███▌ | 185/520 [11:51<20:46, 3.72s/it] {'loss': 1.7182, 'grad_norm': 0.0004914300454030791, 'learning_rate': 0.7472960545147037, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:51<20:46, 3.72s/it] 36%|███▌ | 186/520 [11:55<20:39, 3.71s/it] {'loss': 1.5416, 'grad_norm': 0.0005404372175657519, 'learning_rate': 0.7445825005546447, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:55<20:39, 3.71s/it] 36%|███▌ | 187/520 [11:58<20:39, 3.72s/it] {'loss': 1.5605, 'grad_norm': 0.0007452363534676705, 'learning_rate': 0.7418594435526199, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:58<20:39, 3.72s/it] 36%|███▌ | 188/520 [12:02<20:33, 3.71s/it] {'loss': 1.6273, 'grad_norm': 0.0004923939346655271, 'learning_rate': 0.7391269893106591, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:02<20:33, 3.71s/it] 36%|███▋ | 189/520 [12:06<20:34, 3.73s/it] {'loss': 1.6589, 'grad_norm': 0.000540721725996242, 'learning_rate': 0.7363852439959135, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:06<20:34, 3.73s/it] 37%|███▋ | 190/520 [12:10<20:32, 3.73s/it] {'loss': 1.5459, 'grad_norm': 0.0005203988415530705, 'learning_rate': 0.733634314136531, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:10<20:32, 3.73s/it] 37%|███▋ | 191/520 [12:13<20:26, 3.73s/it] {'loss': 1.5138, 'grad_norm': 0.0005031270345594956, 'learning_rate': 0.7308743066175171, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:13<20:26, 3.73s/it] 37%|███▋ | 192/520 [12:17<20:27, 3.74s/it] {'loss': 1.6231, 'grad_norm': 0.0004989073995952794, 'learning_rate': 0.7281053286765815, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:17<20:27, 3.74s/it] 37%|███▋ | 193/520 [12:21<20:19, 3.73s/it] {'loss': 1.8601, 'grad_norm': 0.0005717786221574275, 'learning_rate': 0.7253274878999727, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:21<20:19, 3.73s/it] 37%|███▋ | 194/520 [12:24<20:11, 3.72s/it] {'loss': 1.7174, 'grad_norm': 0.0005898804241862771, 'learning_rate': 0.7225408922182961, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:24<20:11, 3.72s/it] 38%|███▊ | 195/520 [12:28<20:10, 3.73s/it] {'loss': 1.5961, 'grad_norm': 0.0005511514689947441, 'learning_rate': 0.7197456499023225, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:28<20:10, 3.73s/it] 38%|███▊ | 196/520 [12:32<20:02, 3.71s/it] {'loss': 1.584, 'grad_norm': 0.0005937522383308091, 'learning_rate': 0.716941869558779, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:32<20:02, 3.71s/it] 38%|███▊ | 197/520 [12:36<20:00, 3.72s/it] {'loss': 1.5381, 'grad_norm': 0.0005577096228827492, 'learning_rate': 0.7141296601261313, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:36<20:00, 3.72s/it] 38%|███▊ | 198/520 [12:39<19:54, 3.71s/it] {'loss': 1.6597, 'grad_norm': 0.0007391018952744275, 'learning_rate': 0.7113091308703497, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:39<19:54, 3.71s/it] 38%|███▊ | 199/520 [12:43<19:55, 3.72s/it] {'loss': 1.5416, 'grad_norm': 0.0006092370676253778, 'learning_rate': 0.7084803913806641, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:43<19:55, 3.72s/it] 38%|███▊ | 200/520 [12:47<19:51, 3.72s/it] {'loss': 1.7634, 'grad_norm': 0.0005862347980910922, 'learning_rate': 0.7056435515653059, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:47<19:51, 3.72s/it] 39%|███▊ | 201/520 [12:51<19:51, 3.73s/it] {'loss': 1.7535, 'grad_norm': 0.0007519168536061073, 'learning_rate': 0.7027987216472376, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:51<19:51, 3.73s/it] 39%|███▉ | 202/520 [12:54<19:48, 3.74s/it] {'loss': 1.5356, 'grad_norm': 0.0006333670156981917, 'learning_rate': 0.6999460121598704, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:54<19:48, 3.74s/it] 39%|███▉ | 203/520 [12:58<19:45, 3.74s/it] {'loss': 1.5873, 'grad_norm': 0.0006124880911352812, 'learning_rate': 0.6970855339427697, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:58<19:45, 3.74s/it] 39%|███▉ | 204/520 [13:02<19:41, 3.74s/it] {'loss': 1.6528, 'grad_norm': 0.0005428592005612056, 'learning_rate': 0.6942173981373474, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:02<19:41, 3.74s/it] 39%|███▉ | 205/520 [13:06<19:40, 3.75s/it] {'loss': 1.8114, 'grad_norm': 0.0005821111253368882, 'learning_rate': 0.6913417161825449, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:06<19:40, 3.75s/it] 40%|███▉ | 206/520 [13:09<19:38, 3.75s/it] {'loss': 1.6704, 'grad_norm': 0.0005401146936046055, 'learning_rate': 0.6884585998105026, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:09<19:38, 3.75s/it] 40%|███▉ | 207/520 [13:13<19:32, 3.75s/it] {'loss': 1.7787, 'grad_norm': 0.0007476906687074329, 'learning_rate': 0.685568161042219, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:13<19:32, 3.75s/it] 40%|████ | 208/520 [13:17<19:33, 3.76s/it] {'loss': 1.6119, 'grad_norm': 0.0005553159107211286, 'learning_rate': 0.6826705121831976, 'epoch': 0.4} + 40%|████ | 208/520 [13:17<19:33, 3.76s/it] 40%|████ | 209/520 [13:21<19:23, 3.74s/it] {'loss': 1.5627, 'grad_norm': 0.0005335176271237183, 'learning_rate': 0.6797657658190838, 'epoch': 0.4} + 40%|████ | 209/520 [13:21<19:23, 3.74s/it] 40%|████ | 210/520 [13:24<19:23, 3.75s/it] {'loss': 1.5917, 'grad_norm': 0.0004723683350052555, 'learning_rate': 0.6768540348112907, 'epoch': 0.4} + 40%|████ | 210/520 [13:24<19:23, 3.75s/it] 41%|████ | 211/520 [13:28<19:14, 3.74s/it] {'loss': 1.6444, 'grad_norm': 0.0005437041858324275, 'learning_rate': 0.6739354322926135, 'epoch': 0.41} + 41%|████ | 211/520 [13:28<19:14, 3.74s/it] 41%|████ | 212/520 [13:32<19:04, 3.72s/it] {'loss': 1.5984, 'grad_norm': 0.000602861287408374, 'learning_rate': 0.6710100716628344, 'epoch': 0.41} + 41%|████ | 212/520 [13:32<19:04, 3.72s/it] 41%|████ | 213/520 [13:35<18:59, 3.71s/it] {'loss': 1.5754, 'grad_norm': 0.0005138817536812006, 'learning_rate': 0.6680780665843155, 'epoch': 0.41} + 41%|████ | 213/520 [13:35<18:59, 3.71s/it] 41%|████ | 214/520 [13:39<18:55, 3.71s/it] {'loss': 1.622, 'grad_norm': 0.0005003958477106078, 'learning_rate': 0.6651395309775836, 'epoch': 0.41} + 41%|████ | 214/520 [13:39<18:55, 3.71s/it] 41%|████▏ | 215/520 [13:43<18:52, 3.71s/it] {'loss': 1.6949, 'grad_norm': 0.0005232903836933718, 'learning_rate': 0.6621945790169036, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:43<18:52, 3.71s/it] 42%|████▏ | 216/520 [13:47<18:54, 3.73s/it] {'loss': 1.4504, 'grad_norm': 0.0004907041729452226, 'learning_rate': 0.6592433251258423, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:47<18:54, 3.73s/it] 42%|████▏ | 217/520 [13:50<18:56, 3.75s/it] {'loss': 1.6123, 'grad_norm': 0.0004685264859523205, 'learning_rate': 0.6562858839728223, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:50<18:56, 3.75s/it] 42%|████▏ | 218/520 [13:54<18:53, 3.75s/it] {'loss': 1.6203, 'grad_norm': 0.0005099759223502029, 'learning_rate': 0.6533223704666672, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:54<18:53, 3.75s/it] 42%|████▏ | 219/520 [13:58<18:45, 3.74s/it] {'loss': 1.5539, 'grad_norm': 0.0003842321011708347, 'learning_rate': 0.6503528997521365, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:58<18:45, 3.74s/it] 42%|████▏ | 220/520 [14:02<18:37, 3.73s/it] {'loss': 1.7328, 'grad_norm': 0.0007973845870388285, 'learning_rate': 0.6473775872054521, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:02<18:37, 3.73s/it] 42%|████▎ | 221/520 [14:05<18:36, 3.73s/it] {'loss': 1.5962, 'grad_norm': 0.0004672118858619244, 'learning_rate': 0.644396548429815, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:05<18:36, 3.73s/it] 43%|████▎ | 222/520 [14:09<18:29, 3.72s/it] {'loss': 1.4803, 'grad_norm': 0.00044189311732305823, 'learning_rate': 0.6414098992509137, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:09<18:29, 3.72s/it] 43%|████▎ | 223/520 [14:13<18:22, 3.71s/it] {'loss': 1.4914, 'grad_norm': 0.00042872166483786544, 'learning_rate': 0.6384177557124247, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:13<18:22, 3.71s/it] 43%|████▎ | 224/520 [14:16<18:17, 3.71s/it] {'loss': 2.1701, 'grad_norm': 0.000525595988005482, 'learning_rate': 0.6354202340715026, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:16<18:17, 3.71s/it] 43%|████▎ | 225/520 [14:20<18:10, 3.70s/it] {'loss': 1.5029, 'grad_norm': 0.0004613889857418217, 'learning_rate': 0.6324174507942636, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:20<18:10, 3.70s/it] 43%|████▎ | 226/520 [14:24<18:07, 3.70s/it] {'loss': 1.6115, 'grad_norm': 0.00041901338690531766, 'learning_rate': 0.6294095225512604, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:24<18:07, 3.70s/it] 44%|████▎ | 227/520 [14:27<18:05, 3.71s/it] {'loss': 1.5969, 'grad_norm': 0.0004732135620955438, 'learning_rate': 0.6263965662129487, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:27<18:05, 3.71s/it] 44%|████▍ | 228/520 [14:31<18:04, 3.71s/it] {'loss': 1.9264, 'grad_norm': 0.0005347615161629423, 'learning_rate': 0.6233786988451467, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:31<18:04, 3.71s/it] 44%|████▍ | 229/520 [14:35<17:56, 3.70s/it] {'loss': 1.607, 'grad_norm': 0.0006029248041587073, 'learning_rate': 0.6203560377044866, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:35<17:56, 3.70s/it] 44%|████▍ | 230/520 [14:39<17:52, 3.70s/it] {'loss': 1.4455, 'grad_norm': 0.0004976460319079737, 'learning_rate': 0.6173287002338577, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:39<17:52, 3.70s/it] 44%|████▍ | 231/520 [14:42<17:46, 3.69s/it] {'loss': 1.544, 'grad_norm': 0.00047381914131054014, 'learning_rate': 0.6142968040578448, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:42<17:46, 3.69s/it] 45%|████▍ | 232/520 [14:46<17:45, 3.70s/it] {'loss': 1.9586, 'grad_norm': 0.0005735883238263106, 'learning_rate': 0.6112604669781572, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:46<17:45, 3.70s/it] 45%|████▍ | 233/520 [14:50<17:40, 3.70s/it] {'loss': 1.8015, 'grad_norm': 0.0006494309099351063, 'learning_rate': 0.6082198069690514, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:50<17:40, 3.70s/it] 45%|████▌ | 234/520 [14:53<17:41, 3.71s/it] {'loss': 1.5, 'grad_norm': 0.0004835515936685573, 'learning_rate': 0.6051749421727479, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:53<17:41, 3.71s/it] 45%|████▌ | 235/520 [14:57<17:38, 3.72s/it] {'loss': 1.5371, 'grad_norm': 0.0004486304360520204, 'learning_rate': 0.6021259908948402, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:57<17:38, 3.72s/it] 45%|████▌ | 236/520 [15:01<17:30, 3.70s/it] {'loss': 1.6858, 'grad_norm': 0.00047110900157161577, 'learning_rate': 0.5990730715996988, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:01<17:30, 3.70s/it] 46%|████▌ | 237/520 [15:04<17:27, 3.70s/it] {'loss': 1.6198, 'grad_norm': 0.0004314557221340732, 'learning_rate': 0.5960163029058682, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:04<17:27, 3.70s/it] 46%|████▌ | 238/520 [15:08<17:22, 3.70s/it] {'loss': 1.5686, 'grad_norm': 0.0004906765217750099, 'learning_rate': 0.5929558035814574, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:08<17:22, 3.70s/it] 46%|████▌ | 239/520 [15:12<17:19, 3.70s/it] {'loss': 1.6918, 'grad_norm': 0.000459298919855349, 'learning_rate': 0.5898916925395263, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:12<17:19, 3.70s/it] 46%|████▌ | 240/520 [15:16<17:21, 3.72s/it] {'loss': 1.3911, 'grad_norm': 0.00040199827005744373, 'learning_rate': 0.5868240888334653, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:16<17:21, 3.72s/it] 46%|████▋ | 241/520 [15:19<17:20, 3.73s/it] {'loss': 1.4781, 'grad_norm': 0.0004088263509681721, 'learning_rate': 0.5837531116523682, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:19<17:20, 3.73s/it] 47%|████▋ | 242/520 [15:23<17:19, 3.74s/it] {'loss': 1.5419, 'grad_norm': 0.0004819241187666902, 'learning_rate': 0.5806788803164034, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:23<17:19, 3.74s/it] 47%|████▋ | 243/520 [15:27<17:15, 3.74s/it] {'loss': 1.4987, 'grad_norm': 0.00041665198188016504, 'learning_rate': 0.5776015142721758, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:27<17:15, 3.74s/it] 47%|████▋ | 244/520 [15:31<17:10, 3.73s/it] {'loss': 1.6828, 'grad_norm': 0.00047764774730982144, 'learning_rate': 0.5745211330880872, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:31<17:10, 3.73s/it] 47%|████▋ | 245/520 [15:34<17:06, 3.73s/it] {'loss': 1.5217, 'grad_norm': 0.0005614250236181231, 'learning_rate': 0.57143785644969, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:34<17:06, 3.73s/it] 47%|████▋ | 246/520 [15:38<17:03, 3.73s/it] {'loss': 1.939, 'grad_norm': 0.0005499354447883977, 'learning_rate': 0.5683518041550367, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:38<17:03, 3.73s/it] 48%|████▊ | 247/520 [15:42<16:57, 3.73s/it] {'loss': 1.691, 'grad_norm': 0.00043564781063712036, 'learning_rate': 0.5652630961100259, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:42<16:57, 3.73s/it] 48%|████▊ | 248/520 [15:46<16:54, 3.73s/it] {'loss': 1.4941, 'grad_norm': 0.0004681683931731805, 'learning_rate': 0.5621718523237427, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:46<16:54, 3.73s/it] 48%|████▊ | 249/520 [15:49<16:51, 3.73s/it] {'loss': 1.6277, 'grad_norm': 0.0004361860574903311, 'learning_rate': 0.5590781929037965, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:49<16:51, 3.73s/it] 48%|████▊ | 250/520 [15:53<16:52, 3.75s/it] {'loss': 1.5826, 'grad_norm': 0.0004950819781606992, 'learning_rate': 0.5559822380516539, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:53<16:52, 3.75s/it] 48%|████▊ | 251/520 [15:57<16:48, 3.75s/it] {'loss': 1.6232, 'grad_norm': 0.00041850944526781444, 'learning_rate': 0.552884108057969, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:57<16:48, 3.75s/it] 48%|████▊ | 252/520 [16:01<16:43, 3.75s/it] {'loss': 1.7832, 'grad_norm': 0.00048603780785307776, 'learning_rate': 0.5497839232979084, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:01<16:43, 3.75s/it] 49%|████▊ | 253/520 [16:04<16:42, 3.75s/it] {'loss': 1.6189, 'grad_norm': 0.00048173803590507573, 'learning_rate': 0.5466818042264753, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:04<16:42, 3.75s/it] 49%|████▉ | 254/520 [16:08<16:38, 3.75s/it] {'loss': 1.5079, 'grad_norm': 0.00043642636355828006, 'learning_rate': 0.5435778713738292, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:08<16:38, 3.75s/it] 49%|████▉ | 255/520 [16:12<16:32, 3.75s/it] {'loss': 1.5531, 'grad_norm': 0.0005271805671093861, 'learning_rate': 0.5404722453406017, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:12<16:32, 3.75s/it] 49%|████▉ | 256/520 [16:16<16:37, 3.78s/it] {'loss': 1.5891, 'grad_norm': 0.0004625662232551937, 'learning_rate': 0.5373650467932122, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:16<16:37, 3.78s/it] 49%|████▉ | 257/520 [16:20<16:46, 3.83s/it] {'loss': 1.6172, 'grad_norm': 0.0004568385777546382, 'learning_rate': 0.5342563964591783, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:20<16:46, 3.83s/it] 50%|████▉ | 258/520 [16:24<16:51, 3.86s/it] {'loss': 1.6259, 'grad_norm': 0.00042567861501904527, 'learning_rate': 0.5311464151224261, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:24<16:51, 3.86s/it] 50%|████▉ | 259/520 [16:27<16:54, 3.89s/it] {'loss': 1.6784, 'grad_norm': 0.0004863677680298302, 'learning_rate': 0.5280352236185959, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:27<16:54, 3.89s/it] 50%|█████ | 260/520 [16:31<16:53, 3.90s/it] {'loss': 1.9009, 'grad_norm': 0.00044837877469064414, 'learning_rate': 0.5249229428303486, 'epoch': 0.5} + 50%|█████ | 260/520 [16:31<16:53, 3.90s/it] 50%|█████ | 261/520 [16:35<16:54, 3.92s/it] {'loss': 1.8165, 'grad_norm': 0.0004913228925515452, 'learning_rate': 0.521809693682668, 'epoch': 0.5} + 50%|█████ | 261/520 [16:35<16:54, 3.92s/it] 50%|█████ | 262/520 [16:39<16:50, 3.92s/it] {'loss': 1.5142, 'grad_norm': 0.00042671642844416827, 'learning_rate': 0.5186955971381629, 'epoch': 0.5} + 50%|█████ | 262/520 [16:39<16:50, 3.92s/it] 51%|█████ | 263/520 [16:43<16:50, 3.93s/it] {'loss': 1.8404, 'grad_norm': 0.0005154300856506511, 'learning_rate': 0.5155807741923666, 'epoch': 0.51} + 51%|█████ | 263/520 [16:43<16:50, 3.93s/it] 51%|█████ | 264/520 [16:47<16:49, 3.94s/it] {'loss': 1.6506, 'grad_norm': 0.00043986620676123066, 'learning_rate': 0.5124653458690365, 'epoch': 0.51} + 51%|█████ | 264/520 [16:47<16:49, 3.94s/it] 51%|█████ | 265/520 [16:51<16:44, 3.94s/it] {'loss': 1.5273, 'grad_norm': 0.0004593971299386882, 'learning_rate': 0.5093494332154511, 'epoch': 0.51} + 51%|█████ | 265/520 [16:51<16:44, 3.94s/it] 51%|█████ | 266/520 [16:55<16:45, 3.96s/it] {'loss': 1.3647, 'grad_norm': 0.0003843579912925494, 'learning_rate': 0.5062331572977076, 'epoch': 0.51} + 51%|█████ | 266/520 [16:55<16:45, 3.96s/it] 51%|█████▏ | 267/520 [16:59<16:42, 3.96s/it] {'loss': 1.5374, 'grad_norm': 0.00045746128403236554, 'learning_rate': 0.5031166391960168, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:59<16:42, 3.96s/it] 52%|█████▏ | 268/520 [17:03<16:40, 3.97s/it] {'loss': 1.9817, 'grad_norm': 0.0005516811165081328, 'learning_rate': 0.5, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:03<16:40, 3.97s/it] 52%|█████▏ | 269/520 [17:07<16:34, 3.96s/it] {'loss': 1.6336, 'grad_norm': 0.0004576968564871662, 'learning_rate': 0.4968833608039832, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:07<16:34, 3.96s/it] 52%|█████▏ | 270/520 [17:11<16:29, 3.96s/it] {'loss': 1.7263, 'grad_norm': 0.0004866672350539501, 'learning_rate': 0.4937668427022925, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:11<16:29, 3.96s/it] 52%|█████▏ | 271/520 [17:15<16:25, 3.96s/it] {'loss': 1.6545, 'grad_norm': 0.0004643371466726272, 'learning_rate': 0.490650566784549, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:15<16:25, 3.96s/it] 52%|█████▏ | 272/520 [17:19<16:25, 3.97s/it] {'loss': 1.749, 'grad_norm': 0.000504697748295577, 'learning_rate': 0.48753465413096364, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:19<16:25, 3.97s/it] 52%|█████▎ | 273/520 [17:23<16:22, 3.98s/it] {'loss': 1.8599, 'grad_norm': 0.0005183305897820475, 'learning_rate': 0.4844192258076335, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:23<16:22, 3.98s/it] 53%|█████▎ | 274/520 [17:27<16:18, 3.98s/it] {'loss': 1.5852, 'grad_norm': 0.0004901722373218649, 'learning_rate': 0.48130440286183723, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:27<16:18, 3.98s/it] 53%|█████▎ | 275/520 [17:31<16:12, 3.97s/it] {'loss': 1.5159, 'grad_norm': 0.000422406921774331, 'learning_rate': 0.47819030631733206, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:31<16:12, 3.97s/it] 53%|█████▎ | 276/520 [17:35<16:10, 3.98s/it] {'loss': 1.6155, 'grad_norm': 0.00047064726570943144, 'learning_rate': 0.4750770571696514, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:35<16:10, 3.98s/it] 53%|█████▎ | 277/520 [17:39<16:03, 3.97s/it] {'loss': 1.8561, 'grad_norm': 0.0004378000612465765, 'learning_rate': 0.47196477638140405, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:39<16:03, 3.97s/it] 53%|█████▎ | 278/520 [17:43<15:56, 3.95s/it] {'loss': 1.4645, 'grad_norm': 0.0004179503610790503, 'learning_rate': 0.46885358487757395, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:43<15:56, 3.95s/it] 54%|█████▎ | 279/520 [17:47<15:51, 3.95s/it] {'loss': 1.7758, 'grad_norm': 0.0005327140883759302, 'learning_rate': 0.46574360354082167, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:47<15:51, 3.95s/it] 54%|█████▍ | 280/520 [17:51<15:47, 3.95s/it] {'loss': 1.5204, 'grad_norm': 0.000526454668558338, 'learning_rate': 0.4626349532067879, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:51<15:47, 3.95s/it] 54%|█████▍ | 281/520 [17:55<15:40, 3.93s/it] {'loss': 1.6664, 'grad_norm': 0.00048285863736136027, 'learning_rate': 0.4595277546593983, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:55<15:40, 3.93s/it] 54%|█████▍ | 282/520 [17:58<15:38, 3.94s/it] {'loss': 1.4806, 'grad_norm': 0.00047707948177309287, 'learning_rate': 0.4564221286261709, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:58<15:38, 3.94s/it] 54%|█████▍ | 283/520 [18:02<15:33, 3.94s/it] {'loss': 1.7008, 'grad_norm': 0.0005199132085213848, 'learning_rate': 0.4533181957735247, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:02<15:33, 3.94s/it] 55%|█████▍ | 284/520 [18:06<15:28, 3.93s/it] {'loss': 1.7337, 'grad_norm': 0.0005899838688557355, 'learning_rate': 0.45021607670209174, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:06<15:28, 3.93s/it] 55%|█████▍ | 285/520 [18:10<15:22, 3.93s/it] {'loss': 1.5387, 'grad_norm': 0.0005322392910892536, 'learning_rate': 0.44711589194203116, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:10<15:22, 3.93s/it] 55%|█████▌ | 286/520 [18:14<15:19, 3.93s/it] {'loss': 1.3691, 'grad_norm': 0.000508718907635923, 'learning_rate': 0.4440177619483461, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:14<15:19, 3.93s/it] 55%|█████▌ | 287/520 [18:18<15:14, 3.92s/it] {'loss': 1.6557, 'grad_norm': 0.0004842484722145656, 'learning_rate': 0.4409218070962036, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:18<15:14, 3.92s/it] 55%|█████▌ | 288/520 [18:22<15:10, 3.93s/it] {'loss': 1.7406, 'grad_norm': 0.00039362560964070536, 'learning_rate': 0.43782814767625755, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:22<15:10, 3.93s/it] 56%|█████▌ | 289/520 [18:26<15:07, 3.93s/it] {'loss': 1.5411, 'grad_norm': 0.0004414092450099813, 'learning_rate': 0.4347369038899743, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:26<15:07, 3.93s/it] 56%|█████▌ | 290/520 [18:30<15:02, 3.92s/it] {'loss': 1.4882, 'grad_norm': 0.0004359943440329104, 'learning_rate': 0.4316481958449634, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:30<15:02, 3.92s/it] 56%|█████▌ | 291/520 [18:34<14:58, 3.92s/it] {'loss': 1.5058, 'grad_norm': 0.0004908074212030532, 'learning_rate': 0.4285621435503101, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:34<14:58, 3.92s/it] 56%|█████▌ | 292/520 [18:38<14:54, 3.92s/it] {'loss': 1.5838, 'grad_norm': 0.0004166688432365639, 'learning_rate': 0.4254788669119127, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:38<14:54, 3.92s/it] 56%|█████▋ | 293/520 [18:42<14:52, 3.93s/it] {'loss': 1.489, 'grad_norm': 0.000565497287622958, 'learning_rate': 0.4223984857278242, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:42<14:52, 3.93s/it] 57%|█████▋ | 294/520 [18:46<14:49, 3.93s/it] {'loss': 1.5352, 'grad_norm': 0.0005004984005298367, 'learning_rate': 0.41932111968359664, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:46<14:49, 3.93s/it] 57%|█████▋ | 295/520 [18:50<14:45, 3.94s/it] {'loss': 1.8397, 'grad_norm': 0.0005312546260207461, 'learning_rate': 0.41624688834763185, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:50<14:45, 3.94s/it] 57%|█████▋ | 296/520 [18:53<14:40, 3.93s/it] {'loss': 1.4906, 'grad_norm': 0.0004980615010452239, 'learning_rate': 0.41317591116653485, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:53<14:40, 3.93s/it] 57%|█████▋ | 297/520 [18:57<14:39, 3.94s/it] {'loss': 1.6646, 'grad_norm': 0.0004993114619004895, 'learning_rate': 0.41010830746047366, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:57<14:39, 3.94s/it] 57%|█████▋ | 298/520 [19:01<14:32, 3.93s/it] {'loss': 1.6026, 'grad_norm': 0.0004781150532020157, 'learning_rate': 0.4070441964185427, 'epoch': 0.57} + 57%|█████▋ | 298/520 [19:01<14:32, 3.93s/it] 57%|█████▊ | 299/520 [19:05<14:31, 3.94s/it] {'loss': 1.8269, 'grad_norm': 0.0005140179256187905, 'learning_rate': 0.40398369709413195, 'epoch': 0.57} + 57%|█████▊ | 299/520 [19:05<14:31, 3.94s/it] 58%|█████▊ | 300/520 [19:09<14:28, 3.95s/it] {'loss': 1.6596, 'grad_norm': 0.00041440923083298513, 'learning_rate': 0.4009269284003013, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:09<14:28, 3.95s/it] 58%|█████▊ | 301/520 [19:13<14:24, 3.95s/it] {'loss': 1.6369, 'grad_norm': 0.0004155029468725224, 'learning_rate': 0.3978740091051599, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:13<14:24, 3.95s/it] 58%|█████▊ | 302/520 [19:17<14:21, 3.95s/it] {'loss': 1.8522, 'grad_norm': 0.000476385900786002, 'learning_rate': 0.3948250578272522, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:17<14:21, 3.95s/it] 58%|█████▊ | 303/520 [19:21<14:16, 3.95s/it] {'loss': 1.5333, 'grad_norm': 0.0005684165184707498, 'learning_rate': 0.3917801930309486, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:21<14:16, 3.95s/it] 58%|█████▊ | 304/520 [19:25<14:13, 3.95s/it] {'loss': 1.7236, 'grad_norm': 0.0005211568465438711, 'learning_rate': 0.38873953302184283, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:25<14:13, 3.95s/it] 59%|█████▊ | 305/520 [19:29<14:08, 3.95s/it] {'loss': 1.7281, 'grad_norm': 0.0005439056724531125, 'learning_rate': 0.3857031959421553, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:29<14:08, 3.95s/it] 59%|█████▉ | 306/520 [19:33<14:03, 3.94s/it] {'loss': 1.5926, 'grad_norm': 0.0004431481692586455, 'learning_rate': 0.3826712997661425, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:33<14:03, 3.94s/it] 59%|█████▉ | 307/520 [19:37<13:58, 3.94s/it] {'loss': 1.5507, 'grad_norm': 0.0004169612010709885, 'learning_rate': 0.3796439622955136, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:37<13:58, 3.94s/it] 59%|█████▉ | 308/520 [19:41<13:55, 3.94s/it] {'loss': 1.6761, 'grad_norm': 0.00043315070917327185, 'learning_rate': 0.37662130115485315, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:41<13:55, 3.94s/it] 59%|█████▉ | 309/520 [19:46<14:45, 4.20s/it] {'loss': 1.5399, 'grad_norm': 0.000437724253338552, 'learning_rate': 0.3736034337870512, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:46<14:45, 4.20s/it] 60%|█████▉ | 310/520 [19:49<14:11, 4.05s/it] {'loss': 1.4985, 'grad_norm': 0.00045952618253554167, 'learning_rate': 0.3705904774487396, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:49<14:11, 4.05s/it] 60%|█████▉ | 311/520 [19:53<13:49, 3.97s/it] {'loss': 1.514, 'grad_norm': 0.0004282195931607099, 'learning_rate': 0.36758254920573635, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:53<13:49, 3.97s/it] 60%|██████ | 312/520 [19:57<13:59, 4.04s/it] {'loss': 1.4718, 'grad_norm': 0.00044967012276927646, 'learning_rate': 0.3645797659284975, 'epoch': 0.6} + 60%|██████ | 312/520 [19:57<13:59, 4.04s/it] 60%|██████ | 313/520 [20:01<13:34, 3.94s/it] {'loss': 1.4294, 'grad_norm': 0.0006727879234904045, 'learning_rate': 0.36158224428757535, 'epoch': 0.6} + 60%|██████ | 313/520 [20:01<13:34, 3.94s/it] 60%|██████ | 314/520 [20:05<13:39, 3.98s/it] {'loss': 1.4897, 'grad_norm': 0.0005374978716077656, 'learning_rate': 0.35859010074908626, 'epoch': 0.6} + 60%|██████ | 314/520 [20:05<13:39, 3.98s/it] 61%|██████ | 315/520 [20:09<13:38, 3.99s/it] {'loss': 1.8642, 'grad_norm': 0.0006590632644783634, 'learning_rate': 0.35560345157018514, 'epoch': 0.61} + 61%|██████ | 315/520 [20:09<13:38, 3.99s/it] 61%|██████ | 316/520 [20:13<13:25, 3.95s/it] {'loss': 1.4645, 'grad_norm': 0.0006497394579234459, 'learning_rate': 0.35262241279454787, 'epoch': 0.61} + 61%|██████ | 316/520 [20:13<13:25, 3.95s/it] 61%|██████ | 317/520 [20:17<13:12, 3.90s/it] {'loss': 1.4643, 'grad_norm': 0.00042316376348402074, 'learning_rate': 0.3496471002478635, 'epoch': 0.61} + 61%|██████ | 317/520 [20:17<13:12, 3.90s/it] 61%|██████ | 318/520 [20:20<12:55, 3.84s/it] {'loss': 1.6369, 'grad_norm': 0.000461204407292713, 'learning_rate': 0.3466776295333329, 'epoch': 0.61} + 61%|██████ | 318/520 [20:20<12:55, 3.84s/it] 61%|██████▏ | 319/520 [20:24<12:59, 3.88s/it] {'loss': 1.4772, 'grad_norm': 0.0004030872168665548, 'learning_rate': 0.34371411602717783, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:24<12:59, 3.88s/it] 62%|██████▏ | 320/520 [20:28<12:45, 3.83s/it] {'loss': 1.4044, 'grad_norm': 0.00041859363766594347, 'learning_rate': 0.34075667487415784, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:28<12:45, 3.83s/it] 62%|██████▏ | 321/520 [20:32<12:58, 3.91s/it] {'loss': 1.6545, 'grad_norm': 0.0004374175379671343, 'learning_rate': 0.3378054209830965, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:32<12:58, 3.91s/it] 62%|██████▏ | 322/520 [20:36<12:44, 3.86s/it] {'loss': 1.6631, 'grad_norm': 0.00045574511964952604, 'learning_rate': 0.3348604690224166, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:36<12:44, 3.86s/it] 62%|██████▏ | 323/520 [20:40<12:29, 3.81s/it] {'loss': 1.7815, 'grad_norm': 0.0004897250161903433, 'learning_rate': 0.3319219334156847, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:40<12:29, 3.81s/it] 62%|██████▏ | 324/520 [20:44<12:32, 3.84s/it] {'loss': 1.573, 'grad_norm': 0.00043041458509281833, 'learning_rate': 0.32898992833716567, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:44<12:32, 3.84s/it] 62%|██████▎ | 325/520 [20:47<12:34, 3.87s/it] {'loss': 1.5682, 'grad_norm': 0.0004510732320186248, 'learning_rate': 0.32606456770738634, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:48<12:34, 3.87s/it] 63%|██████▎ | 326/520 [20:51<12:31, 3.87s/it] {'loss': 1.5623, 'grad_norm': 0.0004235982510972701, 'learning_rate': 0.3231459651887093, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:51<12:31, 3.87s/it] 63%|██████▎ | 327/520 [20:55<12:30, 3.89s/it] {'loss': 1.8666, 'grad_norm': 0.0005461456409979719, 'learning_rate': 0.32023423418091623, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:55<12:30, 3.89s/it] 63%|██████▎ | 328/520 [20:59<12:27, 3.89s/it] {'loss': 1.6528, 'grad_norm': 0.00046436568566953916, 'learning_rate': 0.3173294878168025, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:59<12:27, 3.89s/it] 63%|██████▎ | 329/520 [21:03<12:22, 3.89s/it] {'loss': 1.4557, 'grad_norm': 0.00039487855157762953, 'learning_rate': 0.31443183895778104, 'epoch': 0.63} + 63%|██████▎ | 329/520 [21:03<12:22, 3.89s/it] 63%|██████▎ | 330/520 [21:07<12:19, 3.89s/it] {'loss': 1.5594, 'grad_norm': 0.0004107502123125596, 'learning_rate': 0.3115414001894974, 'epoch': 0.63} + 63%|██████▎ | 330/520 [21:07<12:19, 3.89s/it] 64%|██████▎ | 331/520 [21:11<12:15, 3.89s/it] {'loss': 1.5082, 'grad_norm': 0.00043672395821743484, 'learning_rate': 0.30865828381745514, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:11<12:15, 3.89s/it] 64%|██████▍ | 332/520 [21:15<12:13, 3.90s/it] {'loss': 1.8418, 'grad_norm': 0.00048561382565798477, 'learning_rate': 0.30578260186265266, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:15<12:13, 3.90s/it] 64%|██████▍ | 333/520 [21:19<12:09, 3.90s/it] {'loss': 1.7348, 'grad_norm': 0.00045788952050355563, 'learning_rate': 0.3029144660572304, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:19<12:09, 3.90s/it] 64%|██████▍ | 334/520 [21:23<12:03, 3.89s/it] {'loss': 1.5726, 'grad_norm': 0.0004335447648374433, 'learning_rate': 0.3000539878401296, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:23<12:03, 3.89s/it] 64%|██████▍ | 335/520 [21:26<11:49, 3.83s/it] {'loss': 1.5813, 'grad_norm': 0.0004334273987594702, 'learning_rate': 0.29720127835276255, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:26<11:49, 3.83s/it] 65%|██████▍ | 336/520 [21:30<11:39, 3.80s/it] {'loss': 1.4706, 'grad_norm': 0.0005136613220014179, 'learning_rate': 0.29435644843469433, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:30<11:39, 3.80s/it] 65%|██████▍ | 337/520 [21:34<11:31, 3.78s/it] {'loss': 1.444, 'grad_norm': 0.0004380519709420752, 'learning_rate': 0.2915196086193361, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:34<11:31, 3.78s/it] 65%|██████▌ | 338/520 [21:37<11:25, 3.77s/it] {'loss': 1.6065, 'grad_norm': 0.0004210586275646185, 'learning_rate': 0.28869086912965036, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:37<11:25, 3.77s/it] 65%|██████▌ | 339/520 [21:41<11:20, 3.76s/it] {'loss': 1.5356, 'grad_norm': 0.0004221738406250373, 'learning_rate': 0.28587033987386856, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:41<11:20, 3.76s/it] 65%|██████▌ | 340/520 [21:45<11:15, 3.75s/it] {'loss': 1.4928, 'grad_norm': 0.000410645006547253, 'learning_rate': 0.28305813044122097, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:45<11:15, 3.75s/it] 66%|██████▌ | 341/520 [21:49<11:10, 3.75s/it] {'loss': 1.5326, 'grad_norm': 0.00045398469417942314, 'learning_rate': 0.28025435009767746, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:49<11:10, 3.75s/it] 66%|██████▌ | 342/520 [21:52<11:04, 3.73s/it] {'loss': 1.8389, 'grad_norm': 0.0006581806848819495, 'learning_rate': 0.2774591077817038, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:52<11:04, 3.73s/it] 66%|██████▌ | 343/520 [21:56<11:02, 3.74s/it] {'loss': 1.8019, 'grad_norm': 0.0005008926268574842, 'learning_rate': 0.2746725121000273, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:56<11:02, 3.74s/it] 66%|██████▌ | 344/520 [22:00<10:57, 3.73s/it] {'loss': 1.4641, 'grad_norm': 0.0004036995265509746, 'learning_rate': 0.2718946713234185, 'epoch': 0.66} + 66%|██████▌ | 344/520 [22:00<10:57, 3.73s/it] 66%|██████▋ | 345/520 [22:04<10:54, 3.74s/it] {'loss': 1.6089, 'grad_norm': 0.0005031672956860124, 'learning_rate': 0.26912569338248316, 'epoch': 0.66} + 66%|██████▋ | 345/520 [22:04<10:54, 3.74s/it] 67%|██████▋ | 346/520 [22:07<10:48, 3.73s/it] {'loss': 1.7686, 'grad_norm': 0.0008262360539279564, 'learning_rate': 0.266365685863469, 'epoch': 0.67} + 67%|██████▋ | 346/520 [22:07<10:48, 3.73s/it] 67%|██████▋ | 347/520 [22:11<10:44, 3.72s/it] {'loss': 1.4872, 'grad_norm': 0.0003914522351364112, 'learning_rate': 0.26361475600408657, 'epoch': 0.67} + 67%|██████▋ | 347/520 [22:11<10:44, 3.72s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:15<10:40, 3.72s/it] {'loss': 1.4849, 'grad_norm': 0.0005048633644818911, 'learning_rate': 0.26087301068934104, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:15<10:40, 3.72s/it] 67%|██████▋ | 349/520 [22:18<10:35, 3.71s/it] {'loss': 1.5323, 'grad_norm': 0.0004671402981892116, 'learning_rate': 0.2581405564473801, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:18<10:35, 3.71s/it] 67%|██████▋ | 350/520 [22:22<10:30, 3.71s/it] {'loss': 1.5375, 'grad_norm': 0.00040059499406503747, 'learning_rate': 0.2554174994453555, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:22<10:30, 3.71s/it] 68%|██████▊ | 351/520 [22:26<10:28, 3.72s/it] {'loss': 1.4163, 'grad_norm': 0.0003546581150077797, 'learning_rate': 0.2527039454852963, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:26<10:28, 3.72s/it] 68%|██████▊ | 352/520 [22:30<10:29, 3.75s/it] {'loss': 1.5855, 'grad_norm': 0.0003727850839008445, 'learning_rate': 0.2500000000000001, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:30<10:29, 3.75s/it] 68%|██████▊ | 353/520 [22:33<10:27, 3.76s/it] {'loss': 1.7367, 'grad_norm': 0.0004450371440691608, 'learning_rate': 0.24730576804893478, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:33<10:27, 3.76s/it] 68%|██████▊ | 354/520 [22:37<10:22, 3.75s/it] {'loss': 1.8453, 'grad_norm': 0.0005270399208290677, 'learning_rate': 0.24462135431415732, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:37<10:22, 3.75s/it] 68%|██████▊ | 355/520 [22:41<10:17, 3.74s/it] {'loss': 1.5143, 'grad_norm': 0.000412131617753156, 'learning_rate': 0.24194686309624663, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:41<10:17, 3.74s/it] 68%|██████▊ | 356/520 [22:45<10:12, 3.73s/it] {'loss': 1.5138, 'grad_norm': 0.00044524713570594644, 'learning_rate': 0.239282398310251, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:45<10:12, 3.73s/it] 69%|██████▊ | 357/520 [22:48<10:06, 3.72s/it] {'loss': 1.5055, 'grad_norm': 0.0003916533732864933, 'learning_rate': 0.2366280634816496, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:48<10:06, 3.72s/it] 69%|██████▉ | 358/520 [22:52<10:01, 3.71s/it] {'loss': 1.4718, 'grad_norm': 0.0004411101323987932, 'learning_rate': 0.23398396174233177, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:52<10:01, 3.71s/it] 69%|██████▉ | 359/520 [22:56<10:03, 3.75s/it] {'loss': 1.7843, 'grad_norm': 0.00048357796918194765, 'learning_rate': 0.231350195826588, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:56<10:03, 3.75s/it] 69%|██████▉ | 360/520 [23:00<10:00, 3.75s/it] {'loss': 1.8107, 'grad_norm': 0.0007017665866764769, 'learning_rate': 0.22872686806712034, 'epoch': 0.69} + 69%|██████▉ | 360/520 [23:00<10:00, 3.75s/it] 69%|██████▉ | 361/520 [23:03<09:54, 3.74s/it] {'loss': 1.8097, 'grad_norm': 0.00047779136989662107, 'learning_rate': 0.2261140803910644, 'epoch': 0.69} + 69%|██████▉ | 361/520 [23:03<09:54, 3.74s/it] 70%|██████▉ | 362/520 [23:07<09:46, 3.71s/it] {'loss': 1.4888, 'grad_norm': 0.0004749284786850491, 'learning_rate': 0.22351193431603028, 'epoch': 0.7} + 70%|██████▉ | 362/520 [23:07<09:46, 3.71s/it] 70%|██████▉ | 363/520 [23:11<09:44, 3.72s/it] {'loss': 1.622, 'grad_norm': 0.0005072889117088999, 'learning_rate': 0.2209205309461581, 'epoch': 0.7} + 70%|██████▉ | 363/520 [23:11<09:44, 3.72s/it] 70%|███████ | 364/520 [23:15<09:44, 3.75s/it] {'loss': 1.8138, 'grad_norm': 0.0005093634945723353, 'learning_rate': 0.21833997096818897, 'epoch': 0.7} + 70%|███████ | 364/520 [23:15<09:44, 3.75s/it] 70%|███████ | 365/520 [23:18<09:41, 3.75s/it] {'loss': 1.6485, 'grad_norm': 0.00048055649870175506, 'learning_rate': 0.2157703546475539, 'epoch': 0.7} + 70%|███████ | 365/520 [23:18<09:41, 3.75s/it] 70%|███████ | 366/520 [23:22<09:36, 3.74s/it] {'loss': 1.5964, 'grad_norm': 0.0004078950589029202, 'learning_rate': 0.2132117818244771, 'epoch': 0.7} + 70%|███████ | 366/520 [23:22<09:36, 3.74s/it] 71%|███████ | 367/520 [23:26<09:31, 3.73s/it] {'loss': 1.5954, 'grad_norm': 0.00042117840686809843, 'learning_rate': 0.21066435191009714, 'epoch': 0.71} + 71%|███████ | 367/520 [23:26<09:31, 3.73s/it] 71%|███████ | 368/520 [23:29<09:27, 3.73s/it] {'loss': 1.4239, 'grad_norm': 0.000545246281549127, 'learning_rate': 0.2081281638826052, 'epoch': 0.71} + 71%|███████ | 368/520 [23:29<09:27, 3.73s/it] 71%|███████ | 369/520 [23:33<09:23, 3.73s/it] {'loss': 1.7623, 'grad_norm': 0.0005222405413922224, 'learning_rate': 0.20560331628339767, 'epoch': 0.71} + 71%|███████ | 369/520 [23:33<09:23, 3.73s/it] 71%|███████ | 370/520 [23:37<09:18, 3.72s/it] {'loss': 1.4925, 'grad_norm': 0.00045629470194074315, 'learning_rate': 0.20308990721324927, 'epoch': 0.71} + 71%|███████ | 370/520 [23:37<09:18, 3.72s/it] 71%|███████▏ | 371/520 [23:41<09:14, 3.72s/it] {'loss': 1.4557, 'grad_norm': 0.0005401227331867503, 'learning_rate': 0.20058803432849986, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:41<09:14, 3.72s/it] 72%|███████▏ | 372/520 [23:44<09:08, 3.70s/it] {'loss': 1.8865, 'grad_norm': 0.00042981999697541337, 'learning_rate': 0.1980977948372612, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:44<09:08, 3.70s/it] 72%|███████▏ | 373/520 [23:48<09:06, 3.72s/it] {'loss': 1.7483, 'grad_norm': 0.0005377588910061426, 'learning_rate': 0.19561928549563967, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:48<09:06, 3.72s/it] 72%|███████▏ | 374/520 [23:52<09:01, 3.71s/it] {'loss': 1.584, 'grad_norm': 0.00042127492149269705, 'learning_rate': 0.19315260260397638, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:52<09:01, 3.71s/it] 72%|███████▏ | 375/520 [23:55<08:57, 3.70s/it] {'loss': 1.4649, 'grad_norm': 0.0008469277851291973, 'learning_rate': 0.1906978420031059, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:55<08:57, 3.70s/it] 72%|███████▏ | 376/520 [23:59<08:54, 3.71s/it] {'loss': 1.6064, 'grad_norm': 0.0003904130326419176, 'learning_rate': 0.18825509907063326, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:59<08:54, 3.71s/it] 72%|███████▎ | 377/520 [24:03<08:50, 3.71s/it] {'loss': 1.547, 'grad_norm': 0.0005066258114728906, 'learning_rate': 0.18582446871722635, 'epoch': 0.72} + 72%|███████▎ | 377/520 [24:03<08:50, 3.71s/it] 73%|███████▎ | 378/520 [24:07<08:46, 3.70s/it] {'loss': 1.5997, 'grad_norm': 0.00043436257100180843, 'learning_rate': 0.18340604538293015, 'epoch': 0.73} + 73%|███████▎ | 378/520 [24:07<08:46, 3.70s/it] 73%|███████▎ | 379/520 [24:10<08:43, 3.71s/it] {'loss': 1.5981, 'grad_norm': 0.0005239645817519807, 'learning_rate': 0.18099992303349577, 'epoch': 0.73} + 73%|███████▎ | 379/520 [24:10<08:43, 3.71s/it] 73%|███████▎ | 380/520 [24:14<08:41, 3.73s/it] {'loss': 1.8738, 'grad_norm': 0.00045218368538042477, 'learning_rate': 0.17860619515673032, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:14<08:41, 3.73s/it] 73%|███████▎ | 381/520 [24:18<08:47, 3.79s/it] {'loss': 1.5978, 'grad_norm': 0.0004580121259580763, 'learning_rate': 0.17622495475886485, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:18<08:47, 3.79s/it] 73%|███████▎ | 382/520 [24:22<08:44, 3.80s/it] {'loss': 1.7828, 'grad_norm': 0.0006096379151281771, 'learning_rate': 0.17385629436093958, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:22<08:44, 3.80s/it] 74%|███████▎ | 383/520 [24:26<08:43, 3.82s/it] {'loss': 1.4129, 'grad_norm': 0.0004734174190462551, 'learning_rate': 0.17150030599520982, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:26<08:43, 3.82s/it] 74%|███████▍ | 384/520 [24:29<08:39, 3.82s/it] {'loss': 2.0614, 'grad_norm': 0.0005381058481375493, 'learning_rate': 0.16915708120157041, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:29<08:39, 3.82s/it] 74%|███████▍ | 385/520 [24:33<08:36, 3.82s/it] {'loss': 1.5763, 'grad_norm': 0.0005540272695170718, 'learning_rate': 0.16682671102399804, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:33<08:36, 3.82s/it] 74%|███████▍ | 386/520 [24:37<08:34, 3.84s/it] {'loss': 1.4715, 'grad_norm': 0.0003591169987154777, 'learning_rate': 0.16450928600701503, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:37<08:34, 3.84s/it] 74%|███████▍ | 387/520 [24:41<08:29, 3.83s/it] {'loss': 1.9006, 'grad_norm': 0.0005018559578931965, 'learning_rate': 0.16220489619216988, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:41<08:29, 3.83s/it] 75%|███████▍ | 388/520 [24:45<08:22, 3.80s/it] {'loss': 1.4428, 'grad_norm': 0.00039104759625100007, 'learning_rate': 0.1599136311145402, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:45<08:22, 3.80s/it] 75%|███████▍ | 389/520 [24:48<08:16, 3.79s/it] {'loss': 1.5401, 'grad_norm': 0.0005364643369859289, 'learning_rate': 0.15763557979925324, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:48<08:16, 3.79s/it] 75%|███████▌ | 390/520 [24:52<08:09, 3.76s/it] {'loss': 1.5817, 'grad_norm': 0.0004634750298627036, 'learning_rate': 0.1553708307580265, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:52<08:09, 3.76s/it] 75%|███████▌ | 391/520 [24:56<08:06, 3.77s/it] {'loss': 1.6827, 'grad_norm': 0.0005165161455289193, 'learning_rate': 0.15311947198572917, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:56<08:06, 3.77s/it] 75%|███████▌ | 392/520 [25:00<08:00, 3.75s/it] {'loss': 1.4757, 'grad_norm': 0.0004223848147437417, 'learning_rate': 0.15088159095696363, 'epoch': 0.75} + 75%|███████▌ | 392/520 [25:00<08:00, 3.75s/it] 76%|███████▌ | 393/520 [25:03<07:53, 3.73s/it] {'loss': 1.6659, 'grad_norm': 0.0006855651147720302, 'learning_rate': 0.14865727462266543, 'epoch': 0.76} + 76%|███████▌ | 393/520 [25:03<07:53, 3.73s/it] 76%|███████▌ | 394/520 [25:07<07:52, 3.75s/it] {'loss': 1.5563, 'grad_norm': 0.0007433143202763595, 'learning_rate': 0.14644660940672627, 'epoch': 0.76} + 76%|███████▌ | 394/520 [25:07<07:52, 3.75s/it] 76%|███████▌ | 395/520 [25:11<07:45, 3.72s/it] {'loss': 1.5096, 'grad_norm': 0.0004382077870473453, 'learning_rate': 0.14424968120263504, 'epoch': 0.76} + 76%|███████▌ | 395/520 [25:11<07:45, 3.72s/it] 76%|███████▌ | 396/520 [25:14<07:40, 3.71s/it] {'loss': 1.5912, 'grad_norm': 0.00048182408758726616, 'learning_rate': 0.14206657537014078, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:14<07:40, 3.71s/it] 76%|███████▋ | 397/520 [25:18<07:37, 3.72s/it] {'loss': 1.5748, 'grad_norm': 0.0004267560459909925, 'learning_rate': 0.1398973767319368, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:18<07:37, 3.72s/it] 77%|███████▋ | 398/520 [25:22<07:33, 3.72s/it] {'loss': 1.5418, 'grad_norm': 0.00046211095466263015, 'learning_rate': 0.13774216957036367, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:22<07:33, 3.72s/it] 77%|███████▋ | 399/520 [25:26<07:30, 3.72s/it] {'loss': 1.728, 'grad_norm': 0.000704172908441176, 'learning_rate': 0.13560103762413583, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:26<07:30, 3.72s/it] 77%|███████▋ | 400/520 [25:29<07:27, 3.73s/it] {'loss': 1.7695, 'grad_norm': 0.0006181864299267048, 'learning_rate': 0.13347406408508694, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:29<07:27, 3.73s/it] 77%|███████▋ | 401/520 [25:33<07:22, 3.72s/it] {'loss': 1.3421, 'grad_norm': 0.0006992496407535059, 'learning_rate': 0.131361331594938, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:33<07:22, 3.72s/it] 77%|███████▋ | 402/520 [25:37<07:18, 3.72s/it] {'loss': 1.4913, 'grad_norm': 0.0004347195021004406, 'learning_rate': 0.12926292224208663, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:37<07:18, 3.72s/it] 78%|███████▊ | 403/520 [25:41<07:14, 3.71s/it] {'loss': 1.5517, 'grad_norm': 0.0004645317287999832, 'learning_rate': 0.1271789175584172, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:41<07:14, 3.71s/it] 78%|███████▊ | 404/520 [25:44<07:11, 3.72s/it] {'loss': 1.4472, 'grad_norm': 0.0007638917505058792, 'learning_rate': 0.12510939851613284, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:44<07:11, 3.72s/it] 78%|███████▊ | 405/520 [25:48<07:09, 3.74s/it] {'loss': 1.7205, 'grad_norm': 0.0005879313835318634, 'learning_rate': 0.12305444552461009, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:48<07:09, 3.74s/it] 78%|███████▊ | 406/520 [25:52<07:04, 3.72s/it] {'loss': 1.6591, 'grad_norm': 0.0006505310572825915, 'learning_rate': 0.12101413842727343, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:52<07:04, 3.72s/it] 78%|███████▊ | 407/520 [25:55<06:59, 3.71s/it] {'loss': 1.6605, 'grad_norm': 0.00044448491281536494, 'learning_rate': 0.1189885564984946, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:55<06:59, 3.71s/it] 78%|███████▊ | 408/520 [25:59<06:55, 3.71s/it] {'loss': 1.5154, 'grad_norm': 0.0006813881392462472, 'learning_rate': 0.11697777844051105, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:59<06:55, 3.71s/it] 79%|███████▊ | 409/520 [26:03<06:50, 3.70s/it] {'loss': 1.6743, 'grad_norm': 0.0005702830164129398, 'learning_rate': 0.1149818823803686, 'epoch': 0.79} + 79%|███████▊ | 409/520 [26:03<06:50, 3.70s/it] 79%|███████▉ | 410/520 [26:07<06:47, 3.71s/it] {'loss': 1.3367, 'grad_norm': 0.0005415632803934128, 'learning_rate': 0.1130009458668863, 'epoch': 0.79} + 79%|███████▉ | 410/520 [26:07<06:47, 3.71s/it] 79%|███████▉ | 411/520 [26:10<06:45, 3.72s/it] {'loss': 1.6397, 'grad_norm': 0.00048373115616803927, 'learning_rate': 0.11103504586764262, 'epoch': 0.79} + 79%|███████▉ | 411/520 [26:10<06:45, 3.72s/it] 79%|███████▉ | 412/520 [26:14<06:42, 3.72s/it] {'loss': 1.5529, 'grad_norm': 0.00045121686880913325, 'learning_rate': 0.1090842587659851, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:14<06:42, 3.72s/it] 79%|███████▉ | 413/520 [26:18<06:39, 3.73s/it] {'loss': 1.8178, 'grad_norm': 0.0005511620677785673, 'learning_rate': 0.10714866035806325, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:18<06:39, 3.73s/it] 80%|███████▉ | 414/520 [26:22<06:38, 3.76s/it] {'loss': 1.5403, 'grad_norm': 0.0006355598229273949, 'learning_rate': 0.10522832584988234, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:22<06:38, 3.76s/it] 80%|███████▉ | 415/520 [26:26<06:40, 3.81s/it] {'loss': 1.4832, 'grad_norm': 0.00042912832481086194, 'learning_rate': 0.10332332985438247, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:26<06:40, 3.81s/it] 80%|████████ | 416/520 [26:29<06:41, 3.86s/it] {'loss': 1.4169, 'grad_norm': 0.000538804001558597, 'learning_rate': 0.10143374638853891, 'epoch': 0.8} + 80%|████████ | 416/520 [26:29<06:41, 3.86s/it] 80%|████████ | 417/520 [26:33<06:39, 3.88s/it] {'loss': 1.6059, 'grad_norm': 0.00042295813567566127, 'learning_rate': 0.09955964887048607, 'epoch': 0.8} + 80%|████████ | 417/520 [26:33<06:39, 3.88s/it] 80%|████████ | 418/520 [26:37<06:37, 3.89s/it] {'loss': 1.6003, 'grad_norm': 0.0005606619709586614, 'learning_rate': 0.09770111011666582, 'epoch': 0.8} + 80%|████████ | 418/520 [26:37<06:37, 3.89s/it] 81%|████████ | 419/520 [26:41<06:33, 3.90s/it] {'loss': 1.5805, 'grad_norm': 0.0004539733804221156, 'learning_rate': 0.09585820233899739, 'epoch': 0.81} + 81%|████████ | 419/520 [26:41<06:33, 3.90s/it] 81%|████████ | 420/520 [26:45<06:30, 3.91s/it] {'loss': 1.4343, 'grad_norm': 0.0004600062387075173, 'learning_rate': 0.09403099714207175, 'epoch': 0.81} + 81%|████████ | 420/520 [26:45<06:30, 3.91s/it] 81%|████████ | 421/520 [26:49<06:27, 3.91s/it] {'loss': 1.3556, 'grad_norm': 0.0004521297218761724, 'learning_rate': 0.09221956552036992, 'epoch': 0.81} + 81%|████████ | 421/520 [26:49<06:27, 3.91s/it] 81%|████████ | 422/520 [26:53<06:22, 3.91s/it] {'loss': 1.5141, 'grad_norm': 0.0004828622710732376, 'learning_rate': 0.09042397785550405, 'epoch': 0.81} + 81%|████████ | 422/520 [26:53<06:22, 3.91s/it] 81%|████████▏ | 423/520 [26:57<06:19, 3.91s/it] {'loss': 1.534, 'grad_norm': 0.0004746774638261814, 'learning_rate': 0.08864430391348332, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:57<06:19, 3.91s/it] 82%|████████▏ | 424/520 [27:01<06:16, 3.92s/it] {'loss': 1.88, 'grad_norm': 0.0006373483083909292, 'learning_rate': 0.08688061284200266, 'epoch': 0.82} + 82%|████████▏ | 424/520 [27:01<06:16, 3.92s/it] 82%|████████▏ | 425/520 [27:05<06:12, 3.92s/it] {'loss': 1.4881, 'grad_norm': 0.000436711306935282, 'learning_rate': 0.08513297316775625, 'epoch': 0.82} + 82%|████████▏ | 425/520 [27:05<06:12, 3.92s/it] 82%|████████▏ | 426/520 [27:09<06:08, 3.92s/it] {'loss': 1.5864, 'grad_norm': 0.0006657665318727333, 'learning_rate': 0.08340145279377559, 'epoch': 0.82} + 82%|████████▏ | 426/520 [27:09<06:08, 3.92s/it] 82%|████████▏ | 427/520 [27:13<06:04, 3.91s/it] {'loss': 1.4181, 'grad_norm': 0.00043275165326350995, 'learning_rate': 0.08168611899679012, 'epoch': 0.82} + 82%|████████▏ | 427/520 [27:13<06:04, 3.91s/it] 82%|████████▏ | 428/520 [27:17<06:01, 3.93s/it] {'loss': 1.3983, 'grad_norm': 0.0004738117155507402, 'learning_rate': 0.0799870384246143, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:17<06:01, 3.93s/it] 82%|████████▎ | 429/520 [27:20<05:57, 3.92s/it] {'loss': 1.5415, 'grad_norm': 0.0004479377113407508, 'learning_rate': 0.07830427709355725, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:20<05:57, 3.92s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:25<05:56, 3.96s/it] {'loss': 1.506, 'grad_norm': 0.00045938155012728853, 'learning_rate': 0.07663790038585794, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:25<05:56, 3.96s/it] 83%|████████▎ | 431/520 [27:28<05:51, 3.95s/it] {'loss': 1.7708, 'grad_norm': 0.0007121443474165354, 'learning_rate': 0.07498797304714544, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:28<05:51, 3.95s/it] 83%|████████▎ | 432/520 [27:32<05:47, 3.94s/it] {'loss': 1.4157, 'grad_norm': 0.00046959594109029164, 'learning_rate': 0.0733545591839222, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:32<05:47, 3.94s/it] 83%|████████▎ | 433/520 [27:36<05:42, 3.94s/it] {'loss': 1.578, 'grad_norm': 0.00046985833276268343, 'learning_rate': 0.07173772226107433, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:36<05:42, 3.94s/it] 83%|████████▎ | 434/520 [27:40<05:39, 3.95s/it] {'loss': 1.2924, 'grad_norm': 0.0004514296793585504, 'learning_rate': 0.07013752509940485, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:40<05:39, 3.95s/it] 84%|████████▎ | 435/520 [27:44<05:37, 3.97s/it] {'loss': 1.6429, 'grad_norm': 0.0004921368202429094, 'learning_rate': 0.06855402987319348, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:44<05:37, 3.97s/it] 84%|████████▍ | 436/520 [27:48<05:32, 3.96s/it] {'loss': 1.3922, 'grad_norm': 0.00048824038616608444, 'learning_rate': 0.06698729810778065, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:48<05:32, 3.96s/it] 84%|████████▍ | 437/520 [27:52<05:26, 3.94s/it] {'loss': 1.6765, 'grad_norm': 0.0005261085122586724, 'learning_rate': 0.0654373906771768, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:52<05:26, 3.94s/it] 84%|████████▍ | 438/520 [27:56<05:23, 3.95s/it] {'loss': 1.4004, 'grad_norm': 0.00046567599662442554, 'learning_rate': 0.06390436780169734, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:56<05:23, 3.95s/it] 84%|████████▍ | 439/520 [28:00<05:19, 3.94s/it] {'loss': 1.6897, 'grad_norm': 0.0007088820075968284, 'learning_rate': 0.06238828904562316, 'epoch': 0.84} + 84%|████████▍ | 439/520 [28:00<05:19, 3.94s/it] 85%|████████▍ | 440/520 [28:04<05:14, 3.93s/it] {'loss': 1.507, 'grad_norm': 0.00043783964804115436, 'learning_rate': 0.06088921331488567, 'epoch': 0.85} + 85%|████████▍ | 440/520 [28:04<05:14, 3.93s/it] 85%|████████▍ | 441/520 [28:08<05:11, 3.94s/it] {'loss': 1.7397, 'grad_norm': 0.0005131898905013396, 'learning_rate': 0.0594071988547788, 'epoch': 0.85} + 85%|████████▍ | 441/520 [28:08<05:11, 3.94s/it] 85%|████████▌ | 442/520 [28:12<05:04, 3.90s/it] {'loss': 1.5467, 'grad_norm': 0.0004491637382933115, 'learning_rate': 0.05794230324769517, 'epoch': 0.85} + 85%|████████▌ | 442/520 [28:12<05:04, 3.90s/it] 85%|████████▌ | 443/520 [28:15<04:55, 3.84s/it] {'loss': 1.5636, 'grad_norm': 0.0005268061408643419, 'learning_rate': 0.05649458341088914, 'epoch': 0.85} + 85%|████████▌ | 443/520 [28:15<04:55, 3.84s/it] 85%|████████▌ | 444/520 [28:19<04:50, 3.82s/it] {'loss': 1.5278, 'grad_norm': 0.0005547888954048068, 'learning_rate': 0.05506409559426573, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:19<04:50, 3.82s/it] 86%|████████▌ | 445/520 [28:23<04:43, 3.79s/it] {'loss': 1.4321, 'grad_norm': 0.00048040975908392105, 'learning_rate': 0.05365089537819434, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:23<04:43, 3.79s/it] 86%|████████▌ | 446/520 [28:27<04:39, 3.78s/it] {'loss': 1.8237, 'grad_norm': 0.0005079060024803795, 'learning_rate': 0.052255037671349536, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:27<04:39, 3.78s/it] 86%|████████▌ | 447/520 [28:30<04:34, 3.76s/it] {'loss': 1.5478, 'grad_norm': 0.0005400434870954087, 'learning_rate': 0.05087657670857798, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:30<04:34, 3.76s/it] 86%|████████▌ | 448/520 [28:34<04:29, 3.74s/it] {'loss': 1.5053, 'grad_norm': 0.0009488900655647913, 'learning_rate': 0.04951556604879048, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:34<04:29, 3.74s/it] 86%|████████▋ | 449/520 [28:38<04:28, 3.78s/it] {'loss': 1.7958, 'grad_norm': 0.0006036740292887873, 'learning_rate': 0.04817205857288176, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:38<04:28, 3.78s/it] 87%|████████▋ | 450/520 [28:42<04:27, 3.83s/it] {'loss': 1.5881, 'grad_norm': 0.0004628053417285441, 'learning_rate': 0.04684610648167503, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:42<04:27, 3.83s/it] 87%|████████▋ | 451/520 [28:46<04:26, 3.86s/it] {'loss': 1.593, 'grad_norm': 0.0005929919064777302, 'learning_rate': 0.04553776129389453, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:46<04:26, 3.86s/it] 87%|████████▋ | 452/520 [28:50<04:24, 3.88s/it] {'loss': 1.8364, 'grad_norm': 0.0006823794362896242, 'learning_rate': 0.04424707384416343, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:50<04:24, 3.88s/it] 87%|████████▋ | 453/520 [28:54<04:20, 3.89s/it] {'loss': 1.7893, 'grad_norm': 0.0004666592373854512, 'learning_rate': 0.042974094281028496, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:54<04:20, 3.89s/it] 87%|████████▋ | 454/520 [28:58<04:18, 3.91s/it] {'loss': 1.4452, 'grad_norm': 0.0004561239848430551, 'learning_rate': 0.0417188720650119, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:58<04:18, 3.91s/it] 88%|████████▊ | 455/520 [29:02<04:14, 3.92s/it] {'loss': 1.6157, 'grad_norm': 0.0004406563254364648, 'learning_rate': 0.04048145596668967, 'epoch': 0.88} + 88%|████████▊ | 455/520 [29:02<04:14, 3.92s/it] 88%|████████▊ | 456/520 [29:05<04:12, 3.94s/it] {'loss': 1.504, 'grad_norm': 0.0005846512166849632, 'learning_rate': 0.03926189406479613, 'epoch': 0.88} + 88%|████████▊ | 456/520 [29:06<04:12, 3.94s/it] 88%|████████▊ | 457/520 [29:09<04:05, 3.90s/it] {'loss': 1.9172, 'grad_norm': 0.0009340077155070514, 'learning_rate': 0.03806023374435663, 'epoch': 0.88} + 88%|████████▊ | 457/520 [29:09<04:05, 3.90s/it] 88%|████████▊ | 458/520 [29:13<03:59, 3.86s/it] {'loss': 1.719, 'grad_norm': 0.0005065458279415136, 'learning_rate': 0.036876521694845676, 'epoch': 0.88} + 88%|████████▊ | 458/520 [29:13<03:59, 3.86s/it] 88%|████████▊ | 459/520 [29:17<03:53, 3.83s/it] {'loss': 1.5853, 'grad_norm': 0.0004047271099507115, 'learning_rate': 0.03571080390837322, 'epoch': 0.88} + 88%|████████▊ | 459/520 [29:17<03:53, 3.83s/it] 88%|████████▊ | 460/520 [29:21<03:47, 3.79s/it] {'loss': 1.4325, 'grad_norm': 0.0005483463523896824, 'learning_rate': 0.03456312567789793, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:21<03:47, 3.79s/it] 89%|████████▊ | 461/520 [29:24<03:42, 3.78s/it] {'loss': 1.9814, 'grad_norm': 0.0005656472610601136, 'learning_rate': 0.03343353159546675, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:24<03:42, 3.78s/it] 89%|████████▉ | 462/520 [29:28<03:37, 3.75s/it] {'loss': 1.8792, 'grad_norm': 0.0004729510789342936, 'learning_rate': 0.032322065550483, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:28<03:37, 3.75s/it] 89%|████████▉ | 463/520 [29:32<03:33, 3.74s/it] {'loss': 1.4115, 'grad_norm': 0.0004811915225566984, 'learning_rate': 0.031228770728000455, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:32<03:33, 3.74s/it] 89%|████████▉ | 464/520 [29:35<03:29, 3.73s/it] {'loss': 1.6122, 'grad_norm': 0.0005141187700298625, 'learning_rate': 0.03015368960704584, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:35<03:29, 3.73s/it] 89%|████████▉ | 465/520 [29:39<03:25, 3.73s/it] {'loss': 1.7267, 'grad_norm': 0.0005393056338893999, 'learning_rate': 0.029096863958968266, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:39<03:25, 3.73s/it] 90%|████████▉ | 466/520 [29:43<03:20, 3.72s/it] {'loss': 1.5791, 'grad_norm': 0.0003758417644595213, 'learning_rate': 0.028058334845816213, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:43<03:20, 3.72s/it] 90%|████████▉ | 467/520 [29:47<03:17, 3.73s/it] {'loss': 1.7153, 'grad_norm': 0.00048245131459627075, 'learning_rate': 0.02703814261874199, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:47<03:17, 3.73s/it] 90%|█████████ | 468/520 [29:50<03:13, 3.72s/it] {'loss': 1.5626, 'grad_norm': 0.0005529577138232305, 'learning_rate': 0.02603632691643415, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:50<03:13, 3.72s/it] 90%|█████████ | 469/520 [29:54<03:10, 3.73s/it] {'loss': 1.6303, 'grad_norm': 0.0004246287537202354, 'learning_rate': 0.025052926663577002, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:54<03:10, 3.73s/it] 90%|█████████ | 470/520 [29:58<03:06, 3.73s/it] {'loss': 1.4641, 'grad_norm': 0.0004232606160836343, 'learning_rate': 0.02408798006933882, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:58<03:06, 3.73s/it] 91%|█████████ | 471/520 [30:01<03:01, 3.71s/it] {'loss': 1.5335, 'grad_norm': 0.00048679811613626584, 'learning_rate': 0.02314152462588659, 'epoch': 0.91} + 91%|█████████ | 471/520 [30:01<03:01, 3.71s/it] 91%|█████████ | 472/520 [30:05<02:59, 3.74s/it] {'loss': 1.4588, 'grad_norm': 0.0004346417224214258, 'learning_rate': 0.022213597106929606, 'epoch': 0.91} + 91%|█████████ | 472/520 [30:05<02:59, 3.74s/it] 91%|█████████ | 473/520 [30:09<02:58, 3.80s/it] {'loss': 1.5356, 'grad_norm': 0.0005394771953707064, 'learning_rate': 0.021304233566290964, 'epoch': 0.91} + 91%|█████████ | 473/520 [30:09<02:58, 3.80s/it] 91%|█████████ | 474/520 [30:13<02:56, 3.83s/it] {'loss': 1.7768, 'grad_norm': 0.0004267857727364517, 'learning_rate': 0.020413469336506118, 'epoch': 0.91} + 91%|█████████ | 474/520 [30:13<02:56, 3.83s/it] 91%|█████████▏| 475/520 [30:17<02:53, 3.86s/it] {'loss': 1.6551, 'grad_norm': 0.00045706269118173993, 'learning_rate': 0.019541339027450255, 'epoch': 0.91} + 91%|█████████▏| 475/520 [30:17<02:53, 3.86s/it] 92%|█████████▏| 476/520 [30:21<02:49, 3.86s/it] {'loss': 1.5119, 'grad_norm': 0.0004441481520898817, 'learning_rate': 0.018687876524993985, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:21<02:49, 3.86s/it] 92%|█████████▏| 477/520 [30:25<02:47, 3.90s/it] {'loss': 1.4987, 'grad_norm': 0.0004777906790706577, 'learning_rate': 0.01785311498968617, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:25<02:47, 3.90s/it] 92%|█████████▏| 478/520 [30:29<02:44, 3.91s/it] {'loss': 1.4472, 'grad_norm': 0.00043497863186989563, 'learning_rate': 0.0170370868554659, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:29<02:44, 3.91s/it] 92%|█████████▏| 479/520 [30:33<02:40, 3.91s/it] {'loss': 1.7729, 'grad_norm': 0.0005428848652995354, 'learning_rate': 0.016239823828401945, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:33<02:40, 3.91s/it] 92%|█████████▏| 480/520 [30:37<02:36, 3.91s/it] {'loss': 1.7545, 'grad_norm': 0.0005233305816384243, 'learning_rate': 0.015461356885461075, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:37<02:36, 3.91s/it] 92%|█████████▎| 481/520 [30:41<02:33, 3.92s/it] {'loss': 1.7943, 'grad_norm': 0.0005702795717604074, 'learning_rate': 0.014701716273304521, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:41<02:33, 3.92s/it] 93%|█████████▎| 482/520 [30:44<02:28, 3.91s/it] {'loss': 1.8102, 'grad_norm': 0.0006592119898431698, 'learning_rate': 0.01396093150711275, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:44<02:28, 3.91s/it] 93%|█████████▎| 483/520 [30:48<02:24, 3.92s/it] {'loss': 1.5681, 'grad_norm': 0.0006613817968711309, 'learning_rate': 0.013239031369438325, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:48<02:24, 3.92s/it] 93%|█████████▎| 484/520 [30:52<02:20, 3.90s/it] {'loss': 1.547, 'grad_norm': 0.00042010855623348547, 'learning_rate': 0.01253604390908819, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:52<02:20, 3.90s/it] 93%|█████████▎| 485/520 [30:56<02:16, 3.91s/it] {'loss': 1.4936, 'grad_norm': 0.00043527928226371387, 'learning_rate': 0.011851996440033319, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:56<02:16, 3.91s/it] 93%|█████████▎| 486/520 [31:00<02:12, 3.90s/it] {'loss': 1.6238, 'grad_norm': 0.00043294380915055166, 'learning_rate': 0.01118691554034773, 'epoch': 0.93} + 93%|█████████▎| 486/520 [31:00<02:12, 3.90s/it] 94%|█████████▎| 487/520 [31:04<02:08, 3.90s/it] {'loss': 1.4428, 'grad_norm': 0.0007836572825159628, 'learning_rate': 0.010540827051175816, 'epoch': 0.94} + 94%|█████████▎| 487/520 [31:04<02:08, 3.90s/it] 94%|█████████▍| 488/520 [31:08<02:03, 3.86s/it] {'loss': 1.3938, 'grad_norm': 0.0006066272671075199, 'learning_rate': 0.009913756075728086, 'epoch': 0.94} + 94%|█████████▍| 488/520 [31:08<02:03, 3.86s/it] 94%|█████████▍| 489/520 [31:11<01:58, 3.81s/it] {'loss': 1.7431, 'grad_norm': 0.00039679215953800355, 'learning_rate': 0.009305726978306172, 'epoch': 0.94} + 94%|█████████▍| 489/520 [31:11<01:58, 3.81s/it] 94%|█████████▍| 490/520 [31:15<01:53, 3.79s/it] {'loss': 1.5456, 'grad_norm': 0.0005253721550561103, 'learning_rate': 0.008716763383355863, 'epoch': 0.94} + 94%|█████████▍| 490/520 [31:15<01:53, 3.79s/it] 94%|█████████▍| 491/520 [31:19<01:49, 3.79s/it] {'loss': 1.4768, 'grad_norm': 0.00046290248367009996, 'learning_rate': 0.008146888174549338, 'epoch': 0.94} + 94%|█████████▍| 491/520 [31:19<01:49, 3.79s/it] 95%|█████████▍| 492/520 [31:23<01:45, 3.77s/it] {'loss': 1.6403, 'grad_norm': 0.0005982451245995766, 'learning_rate': 0.00759612349389599, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:23<01:45, 3.77s/it] 95%|█████████▍| 493/520 [31:26<01:41, 3.77s/it] {'loss': 1.891, 'grad_norm': 0.0005173658468093494, 'learning_rate': 0.007064490740882057, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:26<01:41, 3.77s/it] 95%|█████████▌| 494/520 [31:30<01:38, 3.77s/it] {'loss': 1.549, 'grad_norm': 0.00039770928991267454, 'learning_rate': 0.006552010571639455, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:30<01:38, 3.77s/it] 95%|█████████▌| 495/520 [31:34<01:34, 3.76s/it] {'loss': 1.5071, 'grad_norm': 0.0004406759049927666, 'learning_rate': 0.006058702898142643, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:34<01:34, 3.76s/it] 95%|█████████▌| 496/520 [31:38<01:30, 3.79s/it] {'loss': 1.4428, 'grad_norm': 0.000457053363345206, 'learning_rate': 0.0055845868874357385, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:38<01:30, 3.79s/it] 96%|█████████▌| 497/520 [31:42<01:27, 3.79s/it] {'loss': 1.6949, 'grad_norm': 0.00048436730082932475, 'learning_rate': 0.005129680960887006, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:42<01:27, 3.79s/it] 96%|█████████▌| 498/520 [31:45<01:23, 3.77s/it] {'loss': 1.4925, 'grad_norm': 0.0007139716019569513, 'learning_rate': 0.004694002793473595, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:45<01:23, 3.77s/it] 96%|█████████▌| 499/520 [31:49<01:20, 3.83s/it] {'loss': 1.8504, 'grad_norm': 0.0004612697914861037, 'learning_rate': 0.004277569313094809, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:49<01:20, 3.83s/it] 96%|█████████▌| 500/520 [31:53<01:17, 3.88s/it] {'loss': 1.6584, 'grad_norm': 0.0005377928474059385, 'learning_rate': 0.0038803966999139683, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:53<01:17, 3.88s/it] 96%|█████████▋| 501/520 [31:57<01:14, 3.90s/it] {'loss': 1.7535, 'grad_norm': 0.001068165599225353, 'learning_rate': 0.0035025003857301895, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:57<01:14, 3.90s/it] 97%|█████████▋| 502/520 [32:01<01:10, 3.91s/it] {'loss': 1.5347, 'grad_norm': 0.0004201634138478261, 'learning_rate': 0.003143895053378698, 'epoch': 0.97} + 97%|█████████▋| 502/520 [32:01<01:10, 3.91s/it] 97%|█████████▋| 503/520 [32:05<01:06, 3.92s/it] {'loss': 1.7211, 'grad_norm': 0.0009338869007746853, 'learning_rate': 0.002804594636160118, 'epoch': 0.97} + 97%|█████████▋| 503/520 [32:05<01:06, 3.92s/it] 97%|█████████▋| 504/520 [32:09<01:02, 3.92s/it] {'loss': 1.547, 'grad_norm': 0.0005370151807055481, 'learning_rate': 0.002484612317299295, 'epoch': 0.97} + 97%|█████████▋| 504/520 [32:09<01:02, 3.92s/it] 97%|█████████▋| 505/520 [32:13<00:58, 3.92s/it] {'loss': 1.616, 'grad_norm': 0.0004537715789952699, 'learning_rate': 0.0021839605294330933, 'epoch': 0.97} + 97%|█████████▋| 505/520 [32:13<00:58, 3.92s/it] 97%|█████████▋| 506/520 [32:17<00:55, 3.93s/it] {'loss': 1.4796, 'grad_norm': 0.0007535436814973614, 'learning_rate': 0.0019026509541272274, 'epoch': 0.97} + 97%|█████████▋| 506/520 [32:17<00:55, 3.93s/it] 98%|█████████▊| 507/520 [32:21<00:51, 3.93s/it] {'loss': 1.9117, 'grad_norm': 0.0004399479891778768, 'learning_rate': 0.0016406945214224589, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:21<00:51, 3.93s/it] 98%|█████████▊| 508/520 [32:25<00:47, 3.93s/it] {'loss': 1.6301, 'grad_norm': 0.0005289266664138879, 'learning_rate': 0.0013981014094099353, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:25<00:47, 3.93s/it] 98%|█████████▊| 509/520 [32:29<00:43, 3.93s/it] {'loss': 1.5774, 'grad_norm': 0.0004146882299047533, 'learning_rate': 0.0011748810438355628, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:29<00:43, 3.93s/it] 98%|█████████▊| 510/520 [32:33<00:39, 3.92s/it] {'loss': 1.5409, 'grad_norm': 0.000459142505403501, 'learning_rate': 0.0009710420977340761, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:33<00:39, 3.92s/it] 98%|█████████▊| 511/520 [32:36<00:35, 3.91s/it] {'loss': 1.4878, 'grad_norm': 0.00046728568957161157, 'learning_rate': 0.0007865924910916977, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:36<00:35, 3.91s/it] 98%|█████████▊| 512/520 [32:40<00:31, 3.94s/it] {'loss': 1.3598, 'grad_norm': 0.00048241814702239673, 'learning_rate': 0.0006215393905388278, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:40<00:31, 3.94s/it] 99%|█████████▊| 513/520 [32:44<00:27, 3.95s/it] {'loss': 1.6066, 'grad_norm': 0.0005028338888628264, 'learning_rate': 0.0004758892090711009, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:44<00:27, 3.95s/it] 99%|█████████▉| 514/520 [32:48<00:23, 3.95s/it] {'loss': 1.5862, 'grad_norm': 0.0004434141420199844, 'learning_rate': 0.00034964760580069587, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:48<00:23, 3.95s/it] 99%|█████████▉| 515/520 [32:52<00:19, 3.93s/it] {'loss': 1.6657, 'grad_norm': 0.0005456726629916032, 'learning_rate': 0.00024281948573617873, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:52<00:19, 3.93s/it] 99%|█████████▉| 516/520 [32:56<00:15, 3.93s/it] {'loss': 1.5025, 'grad_norm': 0.0004544605814082912, 'learning_rate': 0.00015540899959187726, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:56<00:15, 3.93s/it] 99%|█████████▉| 517/520 [33:00<00:11, 3.91s/it] {'loss': 1.8085, 'grad_norm': 0.0004846281143528497, 'learning_rate': 8.741954362678772e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [33:00<00:11, 3.91s/it] 100%|█████████▉| 518/520 [33:04<00:07, 3.89s/it] {'loss': 1.5633, 'grad_norm': 0.0005125946428851589, 'learning_rate': 3.885375951256931e-05, 'epoch': 1.0} + 100%|█████████▉| 518/520 [33:04<00:07, 3.89s/it] 100%|█████████▉| 519/520 [33:08<00:03, 3.89s/it] {'loss': 1.782, 'grad_norm': 0.0007270560721732754, 'learning_rate': 9.713534230904042e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [33:08<00:03, 3.89s/it] 100%|██████████| 520/520 [33:13<00:00, 4.15s/it] {'loss': 1.8793, 'grad_norm': 0.0006232931181697368, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [33:13<00:00, 4.15s/it] {'train_runtime': 1993.0519, 'train_samples_per_second': 33.38, 'train_steps_per_second': 0.261, 'train_loss': 1.7387995146788084, 'epoch': 1.0} + 100%|██████████| 520/520 [33:13<00:00, 4.15s/it] 100%|██████████| 520/520 [33:13<00:00, 3.83s/it] +[2025-10-10 06:37:08,964] [INFO] [launch.py:348:main] Process 1785302 exits successfully. +[2025-10-10 06:37:08,965] [INFO] [launch.py:348:main] Process 1785298 exits successfully. +[2025-10-10 06:37:08,965] [INFO] [launch.py:348:main] Process 1785297 exits successfully. +[2025-10-10 06:37:08,966] [INFO] [launch.py:348:main] Process 1785299 exits successfully. +[2025-10-10 06:37:08,966] [INFO] [launch.py:348:main] Process 1785303 exits successfully. +[2025-10-10 06:37:09,968] [INFO] [launch.py:348:main] Process 1785301 exits successfully. +[2025-10-10 06:37:09,968] [INFO] [launch.py:348:main] Process 1785300 exits successfully. +[2025-10-10 06:37:12,972] [INFO] [launch.py:348:main] Process 1785296 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251010_060227.log +Timestamp: 2025-10-10 06:37:15 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251010_091529.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251010_091529.log new file mode 100644 index 0000000000000000000000000000000000000000..e4cd8322c9da27f0fe1d025514f8867eccead766 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251010_091529.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251010_091529.log +Timestamp: 2025-10-10 09:15:29 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 09:15:32,617] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:35,310] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 09:15:35,311] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 1e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 1e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 09:15:38,018] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:39,073] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 09:15:39,074] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 09:15:39,074] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 09:15:39,074] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 09:15:39,074] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 09:15:39,074] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 09:15:39,074] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 09:15:39,076] [INFO] [launch.py:253:main] process 1942332 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:39,078] [INFO] [launch.py:253:main] process 1942333 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:39,080] [INFO] [launch.py:253:main] process 1942334 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:39,082] [INFO] [launch.py:253:main] process 1942335 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:39,084] [INFO] [launch.py:253:main] process 1942336 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:39,086] [INFO] [launch.py:253:main] process 1942337 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:39,088] [INFO] [launch.py:253:main] process 1942338 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:15:39,090] [INFO] [launch.py:253:main] process 1942339 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 09:15:45,647] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:45,899] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:45,953] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:45,955] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:45,987] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:45,995] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:46,012] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:46,012] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:15:46,069] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:46,306] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:46,362] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:46,363] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:46,392] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:46,392] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 09:15:46,402] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:46,414] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:15:46,416] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1942332:1942332 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942332:1942332 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942332:1942332 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1942332:1942332 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1942332:1942332 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1942332:1942332 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:1942337:1942337 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1942337:1942337 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942337:1942337 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942337:1942337 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1942337:1942337 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1942337:1942337 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1942335:1942335 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1942335:1942335 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942335:1942335 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942335:1942335 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1942335:1942335 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1942335:1942335 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1942339:1942339 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1942339:1942339 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942339:1942339 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942339:1942339 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1942339:1942339 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1942339:1942339 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1942338:1942338 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1942338:1942338 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942338:1942338 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942338:1942338 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1942338:1942338 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1942338:1942338 [6] NCCL INFO NET/Plugin: Using internal network plugin. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1942333:1942333 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1942333:1942333 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942333:1942333 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942333:1942333 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1942333:1942333 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1942333:1942333 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1942334:1942334 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1942334:1942334 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942334:1942334 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942334:1942334 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1942334:1942334 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1942334:1942334 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1942336:1942336 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1942336:1942336 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942336:1942336 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942336:1942336 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1942336:1942336 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1942336:1942336 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO ncclCommInitRank comm 0x55e6e3343940 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x76aa5119efdbc36c - Init START +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO ncclCommInitRank comm 0x558668dd2510 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x76aa5119efdbc36c - Init START +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO ncclCommInitRank comm 0x562f482d7280 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x76aa5119efdbc36c - Init START +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO ncclCommInitRank comm 0x55fd074fd6f0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x76aa5119efdbc36c - Init START +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO ncclCommInitRank comm 0x5639c3b75400 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x76aa5119efdbc36c - Init START +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO ncclCommInitRank comm 0x55cce27f3220 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x76aa5119efdbc36c - Init START +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO ncclCommInitRank comm 0x55d3cb849e90 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x76aa5119efdbc36c - Init START +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO ncclCommInitRank comm 0x562886558380 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x76aa5119efdbc36c - Init START +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO comm 0x55d3cb849e90 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO comm 0x562f482d7280 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO comm 0x5639c3b75400 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO comm 0x55fd074fd6f0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO comm 0x558668dd2510 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO comm 0x55cce27f3220 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO comm 0x562886558380 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO comm 0x55e6e3343940 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1942337:1943954 [5] NCCL INFO ncclCommInitRank comm 0x55cce27f3220 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x76aa5119efdbc36c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942338:1943957 [6] NCCL INFO ncclCommInitRank comm 0x558668dd2510 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x76aa5119efdbc36c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942336:1943960 [4] NCCL INFO ncclCommInitRank comm 0x55e6e3343940 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x76aa5119efdbc36c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942339:1943956 [7] NCCL INFO ncclCommInitRank comm 0x5639c3b75400 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x76aa5119efdbc36c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1942335:1943955 [3] NCCL INFO ncclCommInitRank comm 0x562886558380 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x76aa5119efdbc36c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942334:1943959 [2] NCCL INFO ncclCommInitRank comm 0x55d3cb849e90 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x76aa5119efdbc36c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1942333:1943958 [1] NCCL INFO ncclCommInitRank comm 0x562f482d7280 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x76aa5119efdbc36c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1942332:1943953 [0] NCCL INFO ncclCommInitRank comm 0x55fd074fd6f0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x76aa5119efdbc36c - Init COMPLETE +[2025-10-10 09:16:30,959] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.laSome weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +yers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 09:20:35,312] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin...Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... + +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 09:20:54,000 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 09:20:54,006 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:005->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1942336:1949087 [4] NCCL INFO ncclCommInitRank comm 0x7fee6406afe0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x3b9d0836826c01da - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942338:1949085 [6] NCCL INFO ncclCommInitRank comm 0x7f1c5c06b780 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x3b9d0836826c01da - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942334:1949086 [2] NCCL INFO ncclCommInitRank comm 0x7f0cf006af20 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x3b9d0836826c01da - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942335:1949088 [3] NCCL INFO ncclCommInitRank comm 0x7f538006b040 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x3b9d0836826c01da - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942339:1949084 [7] NCCL INFO ncclCommInitRank comm 0x7f57e006a750 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x3b9d0836826c01da - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942332:1949083 [0] NCCL INFO ncclCommInitRank comm 0x7fe1f806b830 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x3b9d0836826c01da - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942337:1949089 [5] NCCL INFO ncclCommInitRank comm 0x7fe7f006b150 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x3b9d0836826c01da - Init COMPLETE +ywang29-vrdb-test1-worker-0:1942333:1949090 [1] NCCL INFO ncclCommInitRank comm 0x7f538006a980 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x3b9d0836826c01da - Init COMPLETE + 0%| | 1/520 [00:13<1:58:41, 13.72s/it] {'loss': 2.0453, 'grad_norm': 0.004833560078610562, 'learning_rate': 0.00625, 'epoch': 0.0} + 0%| | 1/520 [00:13<1:58:41, 13.72s/it] 0%| | 2/520 [00:17<1:07:38, 7.83s/it] {'loss': 2.0549, 'grad_norm': 0.005248854102951281, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:07:38, 7.83s/it] 1%| | 3/520 [00:21<51:16, 5.95s/it] {'loss': 2.1899, 'grad_norm': 0.006004459790096949, 'learning_rate': 0.018750000000000003, 'epoch': 0.01} + 1%| | 3/520 [00:21<51:16, 5.95s/it] 1%| | 4/520 [00:24<43:29, 5.06s/it] {'loss': 2.0656, 'grad_norm': 0.004963478525289788, 'learning_rate': 0.025, 'epoch': 0.01} + 1%| | 4/520 [00:24<43:29, 5.06s/it] 1%| | 5/520 [00:28<39:22, 4.59s/it] {'loss': 2.2333, 'grad_norm': 0.005483190681926941, 'learning_rate': 0.03125, 'epoch': 0.01} + 1%| | 5/520 [00:28<39:22, 4.59s/it] 1%| | 6/520 [00:32<36:36, 4.27s/it] {'loss': 1.6754, 'grad_norm': 0.002803589019348336, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 6/520 [00:32<36:36, 4.27s/it] 1%|▏ | 7/520 [00:35<34:52, 4.08s/it] {'loss': 1.7084, 'grad_norm': 0.002409880492179529, 'learning_rate': 0.043750000000000004, 'epoch': 0.01} + 1%|▏ | 7/520 [00:35<34:52, 4.08s/it] 2%|▏ | 8/520 [00:40<35:36, 4.17s/it] {'loss': 1.6043, 'grad_norm': 0.0013323421921180916, 'learning_rate': 0.05, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<35:36, 4.17s/it] 2%|▏ | 9/520 [00:44<35:43, 4.19s/it] {'loss': 1.6502, 'grad_norm': 0.0007829310500516442, 'learning_rate': 0.05625, 'epoch': 0.02} + 2%|▏ | 9/520 [00:44<35:43, 4.19s/it] 2%|▏ | 10/520 [00:48<34:17, 4.03s/it] {'loss': 1.5058, 'grad_norm': 0.000769688982638587, 'learning_rate': 0.0625, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<34:17, 4.03s/it] 2%|▏ | 11/520 [00:52<33:46, 3.98s/it] {'loss': 1.5199, 'grad_norm': 0.0005698283780126438, 'learning_rate': 0.06875, 'epoch': 0.02} + 2%|▏ | 11/520 [00:52<33:46, 3.98s/it] 2%|▏ | 12/520 [00:55<32:59, 3.90s/it] {'loss': 1.3894, 'grad_norm': 0.00043021078332613604, 'learning_rate': 0.07500000000000001, 'epoch': 0.02} + 2%|▏ | 12/520 [00:55<32:59, 3.90s/it][2025-10-10 09:21:59,196] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:00<34:13, 4.05s/it] {'loss': 1.4858, 'grad_norm': 0.0004653883674113947, 'learning_rate': 0.08125, 'epoch': 0.03} + 2%|▎ | 13/520 [01:00<34:13, 4.05s/it] 3%|▎ | 14/520 [01:03<33:11, 3.93s/it] {'loss': 1.5107, 'grad_norm': 0.00040542587327568847, 'learning_rate': 0.08750000000000001, 'epoch': 0.03} + 3%|▎ | 14/520 [01:03<33:11, 3.93s/it] 3%|▎ | 15/520 [01:07<32:48, 3.90s/it] {'loss': 1.4211, 'grad_norm': 0.000356041469335207, 'learning_rate': 0.09375, 'epoch': 0.03} + 3%|▎ | 15/520 [01:07<32:48, 3.90s/it] 3%|▎ | 16/520 [01:11<32:41, 3.89s/it] {'loss': 1.3835, 'grad_norm': 0.0003934306856644099, 'learning_rate': 0.1, 'epoch': 0.03} + 3%|▎ | 16/520 [01:11<32:41, 3.89s/it] 3%|▎ | 17/520 [01:15<32:34, 3.89s/it] {'loss': 1.5212, 'grad_norm': 0.00042732761887919775, 'learning_rate': 0.0999990286465769, 'epoch': 0.03} + 3%|▎ | 17/520 [01:15<32:34, 3.89s/it] 3%|▎ | 18/520 [01:19<32:31, 3.89s/it] {'loss': 1.3864, 'grad_norm': 0.0005058364623535058, 'learning_rate': 0.09999611462404874, 'epoch': 0.03} + 3%|▎ | 18/520 [01:19<32:31, 3.89s/it] 4%|▎ | 19/520 [01:23<32:26, 3.89s/it] {'loss': 1.3716, 'grad_norm': 0.0004203672638976521, 'learning_rate': 0.09999125804563733, 'epoch': 0.04} + 4%|▎ | 19/520 [01:23<32:26, 3.89s/it] 4%|▍ | 20/520 [01:27<32:24, 3.89s/it] {'loss': 1.3565, 'grad_norm': 0.0005157655167810136, 'learning_rate': 0.09998445910004082, 'epoch': 0.04} + 4%|▍ | 20/520 [01:27<32:24, 3.89s/it] 4%|▍ | 21/520 [01:31<32:24, 3.90s/it] {'loss': 1.3646, 'grad_norm': 0.0005145636834392147, 'learning_rate': 0.09997571805142638, 'epoch': 0.04} + 4%|▍ | 21/520 [01:31<32:24, 3.90s/it] 4%|▍ | 22/520 [01:34<32:01, 3.86s/it] {'loss': 1.4865, 'grad_norm': 0.0005279789318513868, 'learning_rate': 0.09996503523941994, 'epoch': 0.04} + 4%|▍ | 22/520 [01:34<32:01, 3.86s/it] 4%|▍ | 23/520 [01:38<31:32, 3.81s/it] {'loss': 1.4252, 'grad_norm': 0.0005156350246983374, 'learning_rate': 0.0999524110790929, 'epoch': 0.04} + 4%|▍ | 23/520 [01:38<31:32, 3.81s/it] 5%|▍ | 24/520 [01:42<31:18, 3.79s/it] {'loss': 1.3384, 'grad_norm': 0.0005242490218125335, 'learning_rate': 0.09993784606094612, 'epoch': 0.05} + 5%|▍ | 24/520 [01:42<31:18, 3.79s/it] 5%|▍ | 25/520 [01:45<30:58, 3.75s/it] {'loss': 1.4351, 'grad_norm': 0.0006279476264586451, 'learning_rate': 0.09992134075089083, 'epoch': 0.05} + 5%|▍ | 25/520 [01:45<30:58, 3.75s/it] 5%|▌ | 26/520 [01:49<30:42, 3.73s/it] {'loss': 1.3632, 'grad_norm': 0.0005718961964065195, 'learning_rate': 0.0999028957902266, 'epoch': 0.05} + 5%|▌ | 26/520 [01:49<30:42, 3.73s/it] 5%|▌ | 27/520 [01:53<30:29, 3.71s/it] {'loss': 1.2914, 'grad_norm': 0.0005578856571620618, 'learning_rate': 0.09988251189561645, 'epoch': 0.05} + 5%|▌ | 27/520 [01:53<30:29, 3.71s/it] 5%|▌ | 28/520 [01:56<30:18, 3.70s/it] {'loss': 1.3245, 'grad_norm': 0.0006033215086329052, 'learning_rate': 0.099860189859059, 'epoch': 0.05} + 5%|▌ | 28/520 [01:56<30:18, 3.70s/it] 6%|▌ | 29/520 [02:00<30:10, 3.69s/it] {'loss': 1.3381, 'grad_norm': 0.0006171552930833129, 'learning_rate': 0.09983593054785776, 'epoch': 0.06} + 6%|▌ | 29/520 [02:00<30:10, 3.69s/it] 6%|▌ | 30/520 [02:04<30:06, 3.69s/it] {'loss': 1.3947, 'grad_norm': 0.0005559520883600306, 'learning_rate': 0.09980973490458728, 'epoch': 0.06} + 6%|▌ | 30/520 [02:04<30:06, 3.69s/it] 6%|▌ | 31/520 [02:07<30:09, 3.70s/it] {'loss': 1.2978, 'grad_norm': 0.0005327412896386399, 'learning_rate': 0.0997816039470567, 'epoch': 0.06} + 6%|▌ | 31/520 [02:07<30:09, 3.70s/it] 6%|▌ | 32/520 [02:11<30:04, 3.70s/it] {'loss': 1.2128, 'grad_norm': 0.0006182145984375151, 'learning_rate': 0.09975153876827009, 'epoch': 0.06} + 6%|▌ | 32/520 [02:11<30:04, 3.70s/it] 6%|▋ | 33/520 [02:15<30:06, 3.71s/it] {'loss': 1.2905, 'grad_norm': 0.000665621969771221, 'learning_rate': 0.09971954053638399, 'epoch': 0.06} + 6%|▋ | 33/520 [02:15<30:06, 3.71s/it] 7%|▋ | 34/520 [02:19<30:00, 3.70s/it] {'loss': 1.2865, 'grad_norm': 0.0007167636503906873, 'learning_rate': 0.09968561049466214, 'epoch': 0.07} + 7%|▋ | 34/520 [02:19<30:00, 3.70s/it] 7%|▋ | 35/520 [02:22<29:57, 3.71s/it] {'loss': 1.2987, 'grad_norm': 0.0008147311495138253, 'learning_rate': 0.09964974996142698, 'epoch': 0.07} + 7%|▋ | 35/520 [02:22<29:57, 3.71s/it] 7%|▋ | 36/520 [02:26<30:02, 3.72s/it] {'loss': 1.3888, 'grad_norm': 0.0006753960091589135, 'learning_rate': 0.09961196033000862, 'epoch': 0.07} + 7%|▋ | 36/520 [02:26<30:02, 3.72s/it] 7%|▋ | 37/520 [02:30<29:45, 3.70s/it] {'loss': 1.3613, 'grad_norm': 0.0006730715801552501, 'learning_rate': 0.09957224306869053, 'epoch': 0.07} + 7%|▋ | 37/520 [02:30<29:45, 3.70s/it] 7%|▋ | 38/520 [02:33<29:38, 3.69s/it] {'loss': 1.4506, 'grad_norm': 0.0007207614138475074, 'learning_rate': 0.09953059972065265, 'epoch': 0.07} + 7%|▋ | 38/520 [02:33<29:38, 3.69s/it] 8%|▊ | 39/520 [02:37<29:31, 3.68s/it] {'loss': 1.3244, 'grad_norm': 0.0009051830600860277, 'learning_rate': 0.09948703190391131, 'epoch': 0.07} + 8%|▊ | 39/520 [02:37<29:31, 3.68s/it] 8%|▊ | 40/520 [02:41<29:34, 3.70s/it] {'loss': 1.3481, 'grad_norm': 0.0006814867869677018, 'learning_rate': 0.09944154131125643, 'epoch': 0.08} + 8%|▊ | 40/520 [02:41<29:34, 3.70s/it] 8%|▊ | 41/520 [02:45<29:39, 3.72s/it] {'loss': 1.3252, 'grad_norm': 0.0007870085057122315, 'learning_rate': 0.09939412971018574, 'epoch': 0.08} + 8%|▊ | 41/520 [02:45<29:39, 3.72s/it] 8%|▊ | 42/520 [02:48<29:32, 3.71s/it] {'loss': 1.3189, 'grad_norm': 0.0010336799624876343, 'learning_rate': 0.09934479894283606, 'epoch': 0.08} + 8%|▊ | 42/520 [02:48<29:32, 3.71s/it] 8%|▊ | 43/520 [02:52<29:30, 3.71s/it] {'loss': 1.2506, 'grad_norm': 0.0007853313238938874, 'learning_rate': 0.0992935509259118, 'epoch': 0.08} + 8%|▊ | 43/520 [02:52<29:30, 3.71s/it] 8%|▊ | 44/520 [02:56<29:26, 3.71s/it] {'loss': 1.3438, 'grad_norm': 0.0009053924916717351, 'learning_rate': 0.09924038765061041, 'epoch': 0.08} + 8%|▊ | 44/520 [02:56<29:26, 3.71s/it] 9%|▊ | 45/520 [02:59<29:17, 3.70s/it] {'loss': 1.3436, 'grad_norm': 0.0009173882548927186, 'learning_rate': 0.09918531118254507, 'epoch': 0.09} + 9%|▊ | 45/520 [02:59<29:17, 3.70s/it] 9%|▉ | 46/520 [03:03<29:11, 3.69s/it] {'loss': 1.3893, 'grad_norm': 0.0009293260682391057, 'learning_rate': 0.09912832366166442, 'epoch': 0.09} + 9%|▉ | 46/520 [03:03<29:11, 3.69s/it] 9%|▉ | 47/520 [03:07<29:12, 3.71s/it] {'loss': 1.3019, 'grad_norm': 0.0009346898151833344, 'learning_rate': 0.09906942730216939, 'epoch': 0.09} + 9%|▉ | 47/520 [03:07<29:12, 3.71s/it] 9%|▉ | 48/520 [03:10<29:04, 3.70s/it] {'loss': 1.3138, 'grad_norm': 0.001121512784836377, 'learning_rate': 0.09900862439242719, 'epoch': 0.09} + 9%|▉ | 48/520 [03:10<29:04, 3.70s/it] 9%|▉ | 49/520 [03:14<29:04, 3.70s/it] {'loss': 1.3364, 'grad_norm': 0.0009579356975347285, 'learning_rate': 0.09894591729488243, 'epoch': 0.09} + 9%|▉ | 49/520 [03:14<29:04, 3.70s/it] 10%|▉ | 50/520 [03:18<29:02, 3.71s/it] {'loss': 1.3315, 'grad_norm': 0.0009545203695234715, 'learning_rate': 0.09888130844596524, 'epoch': 0.1} + 10%|▉ | 50/520 [03:18<29:02, 3.71s/it] 10%|▉ | 51/520 [03:22<29:08, 3.73s/it] {'loss': 1.27, 'grad_norm': 0.001070942935442853, 'learning_rate': 0.09881480035599667, 'epoch': 0.1} + 10%|▉ | 51/520 [03:22<29:08, 3.73s/it] 10%|█ | 52/520 [03:26<29:33, 3.79s/it] {'loss': 1.4011, 'grad_norm': 0.0011020216242366204, 'learning_rate': 0.09874639560909118, 'epoch': 0.1} + 10%|█ | 52/520 [03:26<29:33, 3.79s/it] 10%|█ | 53/520 [03:29<29:47, 3.83s/it] {'loss': 1.3729, 'grad_norm': 0.0010203010599659215, 'learning_rate': 0.09867609686305617, 'epoch': 0.1} + 10%|█ | 53/520 [03:29<29:47, 3.83s/it] 10%|█ | 54/520 [03:33<29:51, 3.84s/it] {'loss': 1.3131, 'grad_norm': 0.0010178969923969393, 'learning_rate': 0.09860390684928873, 'epoch': 0.1} + 10%|█ | 54/520 [03:33<29:51, 3.84s/it] 11%|█ | 55/520 [03:37<29:53, 3.86s/it] {'loss': 1.2604, 'grad_norm': 0.0011039712265637465, 'learning_rate': 0.09852982837266955, 'epoch': 0.11} + 11%|█ | 55/520 [03:37<29:53, 3.86s/it] 11%|█ | 56/520 [03:41<29:50, 3.86s/it] {'loss': 1.3867, 'grad_norm': 0.0010350097338134325, 'learning_rate': 0.0984538643114539, 'epoch': 0.11} + 11%|█ | 56/520 [03:41<29:50, 3.86s/it] 11%|█ | 57/520 [03:45<29:50, 3.87s/it] {'loss': 1.2596, 'grad_norm': 0.00120778032822996, 'learning_rate': 0.09837601761715982, 'epoch': 0.11} + 11%|█ | 57/520 [03:45<29:50, 3.87s/it] 11%|█ | 58/520 [03:49<29:50, 3.87s/it] {'loss': 1.4006, 'grad_norm': 0.0009293530459569795, 'learning_rate': 0.09829629131445342, 'epoch': 0.11} + 11%|█ | 58/520 [03:49<29:50, 3.87s/it] 11%|█▏ | 59/520 [03:53<29:43, 3.87s/it] {'loss': 1.2149, 'grad_norm': 0.0010479896640851545, 'learning_rate': 0.09821468850103139, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:53<29:43, 3.87s/it] 12%|█▏ | 60/520 [03:57<29:34, 3.86s/it] {'loss': 1.3131, 'grad_norm': 0.0010081667568189667, 'learning_rate': 0.09813121234750061, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:57<29:34, 3.86s/it] 12%|█▏ | 61/520 [04:00<29:16, 3.83s/it] {'loss': 1.2891, 'grad_norm': 0.001100086118137324, 'learning_rate': 0.09804586609725499, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:00<29:16, 3.83s/it] 12%|█▏ | 62/520 [04:04<28:56, 3.79s/it] {'loss': 1.2979, 'grad_norm': 0.0012519595339337273, 'learning_rate': 0.09795865306634939, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:04<28:56, 3.79s/it] 12%|█▏ | 63/520 [04:08<28:53, 3.79s/it] {'loss': 1.2969, 'grad_norm': 0.0011394515505191132, 'learning_rate': 0.09786957664337091, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:08<28:53, 3.79s/it] 12%|█▏ | 64/520 [04:12<28:39, 3.77s/it] {'loss': 1.3154, 'grad_norm': 0.0011507544289383232, 'learning_rate': 0.09777864028930705, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:12<28:39, 3.77s/it] 12%|█▎ | 65/520 [04:15<28:27, 3.75s/it] {'loss': 1.3246, 'grad_norm': 0.0013802347746102537, 'learning_rate': 0.09768584753741134, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:15<28:27, 3.75s/it] 13%|█▎ | 66/520 [04:19<28:18, 3.74s/it] {'loss': 1.2661, 'grad_norm': 0.0010831347688591859, 'learning_rate': 0.09759120199306613, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:19<28:18, 3.74s/it] 13%|█▎ | 67/520 [04:23<28:17, 3.75s/it] {'loss': 1.1871, 'grad_norm': 0.0011618312210633665, 'learning_rate': 0.0974947073336423, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:23<28:17, 3.75s/it] 13%|█▎ | 68/520 [04:27<28:19, 3.76s/it] {'loss': 1.2545, 'grad_norm': 0.0011065825666036224, 'learning_rate': 0.0973963673083566, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:27<28:19, 3.76s/it] 13%|█▎ | 69/520 [04:30<28:13, 3.76s/it] {'loss': 1.2341, 'grad_norm': 0.00121280399722296, 'learning_rate': 0.0972961857381258, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:30<28:13, 3.76s/it] 13%|█▎ | 70/520 [04:34<28:05, 3.74s/it] {'loss': 1.2607, 'grad_norm': 0.0012769043942836846, 'learning_rate': 0.09719416651541839, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:34<28:05, 3.74s/it] 14%|█▎ | 71/520 [04:38<28:02, 3.75s/it] {'loss': 1.2044, 'grad_norm': 0.001120032142654552, 'learning_rate': 0.09709031360410318, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:38<28:02, 3.75s/it] 14%|█▍ | 72/520 [04:41<27:54, 3.74s/it] {'loss': 1.3527, 'grad_norm': 0.0011689081653706336, 'learning_rate': 0.09698463103929543, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:41<27:54, 3.74s/it] 14%|█▍ | 73/520 [04:45<27:49, 3.73s/it] {'loss': 1.1813, 'grad_norm': 0.0012539453816808204, 'learning_rate': 0.09687712292719997, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:45<27:49, 3.73s/it] 14%|█▍ | 74/520 [04:49<27:48, 3.74s/it] {'loss': 1.2821, 'grad_norm': 0.001310268948684568, 'learning_rate': 0.0967677934449517, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:49<27:48, 3.74s/it] 14%|█▍ | 75/520 [04:53<27:44, 3.74s/it] {'loss': 1.2062, 'grad_norm': 0.00111367913326155, 'learning_rate': 0.09665664684045333, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:53<27:44, 3.74s/it] 15%|█▍ | 76/520 [04:56<27:41, 3.74s/it] {'loss': 1.3258, 'grad_norm': 0.0010114338410354077, 'learning_rate': 0.09654368743221022, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:56<27:41, 3.74s/it] 15%|█▍ | 77/520 [05:00<27:36, 3.74s/it] {'loss': 1.1364, 'grad_norm': 0.0014040427390045007, 'learning_rate': 0.09642891960916268, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:00<27:36, 3.74s/it] 15%|█▌ | 78/520 [05:04<27:33, 3.74s/it] {'loss': 1.241, 'grad_norm': 0.0012355275581674456, 'learning_rate': 0.09631234783051544, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:04<27:33, 3.74s/it] 15%|█▌ | 79/520 [05:08<27:42, 3.77s/it] {'loss': 1.223, 'grad_norm': 0.0011793209193031557, 'learning_rate': 0.09619397662556434, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:08<27:42, 3.77s/it] 15%|█▌ | 80/520 [05:12<27:53, 3.80s/it] {'loss': 1.3059, 'grad_norm': 0.0012369277144084509, 'learning_rate': 0.09607381059352038, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:12<27:53, 3.80s/it] 16%|█▌ | 81/520 [05:15<27:59, 3.82s/it] {'loss': 1.3629, 'grad_norm': 0.0015605797117628074, 'learning_rate': 0.09595185440333104, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:15<27:59, 3.82s/it] 16%|█▌ | 82/520 [05:19<28:02, 3.84s/it] {'loss': 1.2909, 'grad_norm': 0.0012519236004247729, 'learning_rate': 0.09582811279349882, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:19<28:02, 3.84s/it] 16%|█▌ | 83/520 [05:23<27:47, 3.82s/it] {'loss': 1.302, 'grad_norm': 0.0013652185433191976, 'learning_rate': 0.09570259057189717, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:23<27:47, 3.82s/it] 16%|█▌ | 84/520 [05:27<27:30, 3.78s/it] {'loss': 1.3143, 'grad_norm': 0.0013589862730152436, 'learning_rate': 0.09557529261558367, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:27<27:30, 3.78s/it] 16%|█▋ | 85/520 [05:31<27:18, 3.77s/it] {'loss': 1.3535, 'grad_norm': 0.0012683532489717977, 'learning_rate': 0.09544622387061055, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:31<27:18, 3.77s/it] 17%|█▋ | 86/520 [05:34<27:05, 3.75s/it] {'loss': 1.3424, 'grad_norm': 0.0012582520127056556, 'learning_rate': 0.09531538935183251, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:34<27:05, 3.75s/it] 17%|█▋ | 87/520 [05:38<26:58, 3.74s/it] {'loss': 1.2594, 'grad_norm': 0.001235506921688923, 'learning_rate': 0.09518279414271184, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:38<26:58, 3.74s/it] 17%|█▋ | 88/520 [05:42<26:55, 3.74s/it] {'loss': 1.2008, 'grad_norm': 0.0009828858017855455, 'learning_rate': 0.09504844339512096, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:42<26:55, 3.74s/it] 17%|█▋ | 89/520 [05:45<26:47, 3.73s/it] {'loss': 1.3022, 'grad_norm': 0.0013523486894444264, 'learning_rate': 0.09491234232914221, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:45<26:47, 3.73s/it] 17%|█▋ | 90/520 [05:49<26:39, 3.72s/it] {'loss': 1.2359, 'grad_norm': 0.0012595951992193399, 'learning_rate': 0.09477449623286505, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:49<26:39, 3.72s/it] 18%|█▊ | 91/520 [05:53<26:33, 3.71s/it] {'loss': 1.303, 'grad_norm': 0.0011875249803346825, 'learning_rate': 0.09463491046218059, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:53<26:33, 3.71s/it] 18%|█▊ | 92/520 [05:57<26:50, 3.76s/it] {'loss': 1.2449, 'grad_norm': 0.0013301160982954888, 'learning_rate': 0.09449359044057344, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:57<26:50, 3.76s/it] 18%|█▊ | 93/520 [06:01<27:04, 3.80s/it] {'loss': 1.254, 'grad_norm': 0.0013501256393481452, 'learning_rate': 0.09435054165891109, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:01<27:04, 3.80s/it] 18%|█▊ | 94/520 [06:05<27:13, 3.83s/it] {'loss': 1.3328, 'grad_norm': 0.0012864138044033015, 'learning_rate': 0.09420576967523049, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:05<27:13, 3.83s/it] 18%|█▊ | 95/520 [06:08<27:21, 3.86s/it] {'loss': 1.2356, 'grad_norm': 0.0014726966520807832, 'learning_rate': 0.09405928011452212, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:08<27:21, 3.86s/it] 18%|█▊ | 96/520 [06:12<27:21, 3.87s/it] {'loss': 1.2587, 'grad_norm': 0.0011615733837603119, 'learning_rate': 0.09391107866851144, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:12<27:21, 3.87s/it] 19%|█▊ | 97/520 [06:16<27:25, 3.89s/it] {'loss': 1.2306, 'grad_norm': 0.0014793627421458785, 'learning_rate': 0.09376117109543769, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:16<27:25, 3.89s/it] 19%|█▉ | 98/520 [06:20<27:21, 3.89s/it] {'loss': 1.232, 'grad_norm': 0.0010944702409898502, 'learning_rate': 0.09360956321983027, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:20<27:21, 3.89s/it] 19%|█▉ | 99/520 [06:24<27:22, 3.90s/it] {'loss': 1.2254, 'grad_norm': 0.001338136838091138, 'learning_rate': 0.09345626093228232, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:24<27:22, 3.90s/it] 19%|█▉ | 100/520 [06:28<27:19, 3.90s/it] {'loss': 1.2103, 'grad_norm': 0.0011786368952649187, 'learning_rate': 0.09330127018922195, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:28<27:19, 3.90s/it] 19%|█▉ | 101/520 [06:32<26:59, 3.86s/it] {'loss': 1.2514, 'grad_norm': 0.001329103251175922, 'learning_rate': 0.09314459701268066, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:32<26:59, 3.86s/it] 20%|█▉ | 102/520 [06:35<26:36, 3.82s/it] {'loss': 1.2464, 'grad_norm': 0.0013487739172479874, 'learning_rate': 0.09298624749005951, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:35<26:36, 3.82s/it] 20%|█▉ | 103/520 [06:39<26:17, 3.78s/it] {'loss': 1.189, 'grad_norm': 0.0011829917051304473, 'learning_rate': 0.09282622777389259, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:39<26:17, 3.78s/it] 20%|██ | 104/520 [06:43<26:03, 3.76s/it] {'loss': 1.258, 'grad_norm': 0.001333428513625804, 'learning_rate': 0.09266454408160779, 'epoch': 0.2} + 20%|██ | 104/520 [06:43<26:03, 3.76s/it] 20%|██ | 105/520 [06:47<26:00, 3.76s/it] {'loss': 1.2486, 'grad_norm': 0.0012441829617671023, 'learning_rate': 0.09250120269528546, 'epoch': 0.2} + 20%|██ | 105/520 [06:47<26:00, 3.76s/it] 20%|██ | 106/520 [06:50<25:52, 3.75s/it] {'loss': 1.2342, 'grad_norm': 0.0011378757518969269, 'learning_rate': 0.09233620996141421, 'epoch': 0.2} + 20%|██ | 106/520 [06:50<25:52, 3.75s/it] 21%|██ | 107/520 [06:54<25:42, 3.73s/it] {'loss': 1.2094, 'grad_norm': 0.0012261686497850924, 'learning_rate': 0.09216957229064429, 'epoch': 0.21} + 21%|██ | 107/520 [06:54<25:42, 3.73s/it] 21%|██ | 108/520 [06:58<25:46, 3.75s/it] {'loss': 1.2051, 'grad_norm': 0.001305253924144071, 'learning_rate': 0.09200129615753859, 'epoch': 0.21} + 21%|██ | 108/520 [06:58<25:46, 3.75s/it] 21%|██ | 109/520 [07:02<25:36, 3.74s/it] {'loss': 1.1841, 'grad_norm': 0.0011219788743198529, 'learning_rate': 0.09183138810032099, 'epoch': 0.21} + 21%|██ | 109/520 [07:02<25:36, 3.74s/it] 21%|██ | 110/520 [07:05<25:33, 3.74s/it] {'loss': 1.3779, 'grad_norm': 0.0013344744732675994, 'learning_rate': 0.09165985472062245, 'epoch': 0.21} + 21%|██ | 110/520 [07:05<25:33, 3.74s/it] 21%|██▏ | 111/520 [07:09<25:25, 3.73s/it] {'loss': 1.3726, 'grad_norm': 0.0013732074702398082, 'learning_rate': 0.09148670268322438, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:09<25:25, 3.73s/it] 22%|██▏ | 112/520 [07:13<25:13, 3.71s/it] {'loss': 1.2678, 'grad_norm': 0.0012735144190041778, 'learning_rate': 0.09131193871579975, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:13<25:13, 3.71s/it] 22%|██▏ | 113/520 [07:16<25:12, 3.72s/it] {'loss': 1.1628, 'grad_norm': 0.00121412271256064, 'learning_rate': 0.09113556960865167, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:16<25:12, 3.72s/it] 22%|██▏ | 114/520 [07:20<25:23, 3.75s/it] {'loss': 1.2585, 'grad_norm': 0.0012656839800329481, 'learning_rate': 0.0909576022144496, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:20<25:23, 3.75s/it] 22%|██▏ | 115/520 [07:24<25:33, 3.79s/it] {'loss': 1.3445, 'grad_norm': 0.0012905544986277597, 'learning_rate': 0.09077804344796302, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:24<25:33, 3.79s/it] 22%|██▏ | 116/520 [07:28<25:40, 3.81s/it] {'loss': 1.36, 'grad_norm': 0.0012290782357399742, 'learning_rate': 0.09059690028579284, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:28<25:40, 3.81s/it] 22%|██▎ | 117/520 [07:32<25:33, 3.81s/it] {'loss': 1.3192, 'grad_norm': 0.0013143119084034704, 'learning_rate': 0.09041417976610028, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:32<25:33, 3.81s/it] 23%|██▎ | 118/520 [07:35<25:17, 3.77s/it] {'loss': 1.2563, 'grad_norm': 0.0012360831869387716, 'learning_rate': 0.09022988898833342, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:35<25:17, 3.77s/it] 23%|██▎ | 119/520 [07:39<25:04, 3.75s/it] {'loss': 1.2081, 'grad_norm': 0.0013131986068225274, 'learning_rate': 0.0900440351129514, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:39<25:04, 3.75s/it] 23%|██▎ | 120/520 [07:43<24:51, 3.73s/it] {'loss': 1.2165, 'grad_norm': 0.0014347065573164892, 'learning_rate': 0.08985662536114614, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:43<24:51, 3.73s/it] 23%|██▎ | 121/520 [07:47<24:38, 3.71s/it] {'loss': 1.2634, 'grad_norm': 0.0013295709764042916, 'learning_rate': 0.08966766701456176, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:47<24:38, 3.71s/it] 23%|██▎ | 122/520 [07:50<24:30, 3.70s/it] {'loss': 1.1837, 'grad_norm': 0.0012178290046483886, 'learning_rate': 0.08947716741501177, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:50<24:30, 3.70s/it] 24%|██▎ | 123/520 [07:54<24:22, 3.68s/it] {'loss': 1.2731, 'grad_norm': 0.0012625738651438844, 'learning_rate': 0.08928513396419369, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:54<24:22, 3.68s/it] 24%|██▍ | 124/520 [07:58<24:21, 3.69s/it] {'loss': 1.2305, 'grad_norm': 0.0013891079650136444, 'learning_rate': 0.0890915741234015, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:58<24:21, 3.69s/it] 24%|██▍ | 125/520 [08:01<24:19, 3.70s/it] {'loss': 1.2341, 'grad_norm': 0.0012818684970658796, 'learning_rate': 0.08889649541323574, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:01<24:19, 3.70s/it] 24%|██▍ | 126/520 [08:06<25:41, 3.91s/it] {'loss': 1.2055, 'grad_norm': 0.001070990081167639, 'learning_rate': 0.08869990541331138, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:06<25:41, 3.91s/it] 24%|██▍ | 127/520 [08:09<25:12, 3.85s/it] {'loss': 1.2185, 'grad_norm': 0.0014529785115951364, 'learning_rate': 0.08850181176196315, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:09<25:12, 3.85s/it] 25%|██▍ | 128/520 [08:13<24:50, 3.80s/it] {'loss': 1.2536, 'grad_norm': 0.0012814871931923007, 'learning_rate': 0.0883022221559489, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:13<24:50, 3.80s/it] 25%|██▍ | 129/520 [08:17<24:36, 3.78s/it] {'loss': 1.2269, 'grad_norm': 0.0012136805358466867, 'learning_rate': 0.08810114435015054, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:17<24:36, 3.78s/it] 25%|██▌ | 130/520 [08:21<24:47, 3.81s/it] {'loss': 1.2458, 'grad_norm': 0.0011669971716597547, 'learning_rate': 0.08789858615727265, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:21<24:47, 3.81s/it] 25%|██▌ | 131/520 [08:25<24:53, 3.84s/it] {'loss': 1.1718, 'grad_norm': 0.0011180515385959123, 'learning_rate': 0.087694555447539, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:25<24:53, 3.84s/it] 25%|██▌ | 132/520 [08:28<24:59, 3.86s/it] {'loss': 1.2977, 'grad_norm': 0.0014563190550571394, 'learning_rate': 0.08748906014838671, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:28<24:59, 3.86s/it] 26%|██▌ | 133/520 [08:32<24:57, 3.87s/it] {'loss': 1.2141, 'grad_norm': 0.0014225869728604912, 'learning_rate': 0.08728210824415827, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:32<24:57, 3.87s/it] 26%|██▌ | 134/520 [08:36<24:51, 3.86s/it] {'loss': 1.2824, 'grad_norm': 0.001290980699049787, 'learning_rate': 0.08707370777579133, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:36<24:51, 3.86s/it] 26%|██▌ | 135/520 [08:40<24:26, 3.81s/it] {'loss': 1.3393, 'grad_norm': 0.0013601502711348486, 'learning_rate': 0.0868638668405062, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:40<24:26, 3.81s/it] 26%|██▌ | 136/520 [08:44<24:13, 3.78s/it] {'loss': 1.2939, 'grad_norm': 0.0013276811708641827, 'learning_rate': 0.08665259359149131, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:44<24:13, 3.78s/it] 26%|██▋ | 137/520 [08:47<24:04, 3.77s/it] {'loss': 1.2068, 'grad_norm': 0.0014796189612569833, 'learning_rate': 0.08643989623758642, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:47<24:04, 3.77s/it] 27%|██▋ | 138/520 [08:51<23:52, 3.75s/it] {'loss': 1.2203, 'grad_norm': 0.0011873916672132923, 'learning_rate': 0.08622578304296363, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:51<23:52, 3.75s/it] 27%|██▋ | 139/520 [08:55<23:44, 3.74s/it] {'loss': 1.0859, 'grad_norm': 0.0011301842761915655, 'learning_rate': 0.08601026232680634, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:55<23:44, 3.74s/it] 27%|██▋ | 140/520 [08:59<23:37, 3.73s/it] {'loss': 1.2285, 'grad_norm': 0.0011436435327419818, 'learning_rate': 0.08579334246298592, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:59<23:37, 3.73s/it] 27%|██▋ | 141/520 [09:02<23:30, 3.72s/it] {'loss': 1.3161, 'grad_norm': 0.0011937646834177564, 'learning_rate': 0.08557503187973652, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:02<23:30, 3.72s/it] 27%|██▋ | 142/520 [09:06<23:22, 3.71s/it] {'loss': 1.2304, 'grad_norm': 0.0011318609315147253, 'learning_rate': 0.08535533905932738, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:06<23:22, 3.71s/it] 28%|██▊ | 143/520 [09:10<23:14, 3.70s/it] {'loss': 1.2434, 'grad_norm': 0.0013609923035903487, 'learning_rate': 0.08513427253773347, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:10<23:14, 3.70s/it] 28%|██▊ | 144/520 [09:13<23:09, 3.70s/it] {'loss': 1.229, 'grad_norm': 0.0014058466152497761, 'learning_rate': 0.08491184090430365, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:13<23:09, 3.70s/it] 28%|██▊ | 145/520 [09:17<23:04, 3.69s/it] {'loss': 1.1502, 'grad_norm': 0.0011933641545215285, 'learning_rate': 0.08468805280142709, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:17<23:04, 3.69s/it] 28%|██▊ | 146/520 [09:21<23:02, 3.70s/it] {'loss': 1.2817, 'grad_norm': 0.0012693491325565332, 'learning_rate': 0.08446291692419736, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:21<23:02, 3.70s/it] 28%|██▊ | 147/520 [09:24<23:08, 3.72s/it] {'loss': 1.1965, 'grad_norm': 0.0013369402488016206, 'learning_rate': 0.08423644202007469, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:24<23:08, 3.72s/it] 28%|██▊ | 148/520 [09:28<23:10, 3.74s/it] {'loss': 1.2185, 'grad_norm': 0.0012062142940372793, 'learning_rate': 0.08400863688854597, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:28<23:10, 3.74s/it] 29%|██▊ | 149/520 [09:32<22:59, 3.72s/it] {'loss': 1.1608, 'grad_norm': 0.0012470957925102947, 'learning_rate': 0.08377951038078302, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:32<22:59, 3.72s/it] 29%|██▉ | 150/520 [09:36<22:52, 3.71s/it] {'loss': 1.3746, 'grad_norm': 0.0012712288668928202, 'learning_rate': 0.08354907139929851, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:36<22:52, 3.71s/it] 29%|██▉ | 151/520 [09:39<22:56, 3.73s/it] {'loss': 1.2113, 'grad_norm': 0.0013289176441860053, 'learning_rate': 0.0833173288976002, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:39<22:56, 3.73s/it] 29%|██▉ | 152/520 [09:43<22:49, 3.72s/it] {'loss': 1.1799, 'grad_norm': 0.0013252547069417967, 'learning_rate': 0.08308429187984298, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:43<22:49, 3.72s/it] 29%|██▉ | 153/520 [09:47<22:48, 3.73s/it] {'loss': 1.209, 'grad_norm': 0.0012505327954666612, 'learning_rate': 0.08284996940047903, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:47<22:48, 3.73s/it] 30%|██▉ | 154/520 [09:50<22:38, 3.71s/it] {'loss': 1.2919, 'grad_norm': 0.0012039905519921977, 'learning_rate': 0.08261437056390607, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:50<22:38, 3.71s/it] 30%|██▉ | 155/520 [09:54<22:36, 3.72s/it] {'loss': 1.2067, 'grad_norm': 0.0013361758504804844, 'learning_rate': 0.08237750452411352, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:54<22:36, 3.72s/it] 30%|███ | 156/520 [09:58<22:32, 3.72s/it] {'loss': 1.2328, 'grad_norm': 0.0014122585030386075, 'learning_rate': 0.08213938048432697, 'epoch': 0.3} + 30%|███ | 156/520 [09:58<22:32, 3.72s/it] 30%|███ | 157/520 [10:02<22:24, 3.70s/it] {'loss': 1.2741, 'grad_norm': 0.0011962657046720683, 'learning_rate': 0.08190000769665044, 'epoch': 0.3} + 30%|███ | 157/520 [10:02<22:24, 3.70s/it] 30%|███ | 158/520 [10:05<22:36, 3.75s/it] {'loss': 1.2161, 'grad_norm': 0.0012870356376050102, 'learning_rate': 0.081659395461707, 'epoch': 0.3} + 30%|███ | 158/520 [10:05<22:36, 3.75s/it] 31%|███ | 159/520 [10:09<22:54, 3.81s/it] {'loss': 1.263, 'grad_norm': 0.0012875036220554705, 'learning_rate': 0.08141755312827736, 'epoch': 0.31} + 31%|███ | 159/520 [10:09<22:54, 3.81s/it] 31%|███ | 160/520 [10:13<23:10, 3.86s/it] {'loss': 1.2632, 'grad_norm': 0.001307611826631059, 'learning_rate': 0.08117449009293669, 'epoch': 0.31} + 31%|███ | 160/520 [10:13<23:10, 3.86s/it] 31%|███ | 161/520 [10:17<23:14, 3.88s/it] {'loss': 1.2406, 'grad_norm': 0.0012621253318362, 'learning_rate': 0.08093021579968941, 'epoch': 0.31} + 31%|███ | 161/520 [10:17<23:14, 3.88s/it] 31%|███ | 162/520 [10:21<23:15, 3.90s/it] {'loss': 1.2159, 'grad_norm': 0.0011894392225832683, 'learning_rate': 0.08068473973960238, 'epoch': 0.31} + 31%|███ | 162/520 [10:21<23:15, 3.90s/it] 31%|███▏ | 163/520 [10:25<23:21, 3.93s/it] {'loss': 1.1494, 'grad_norm': 0.0014340968085521698, 'learning_rate': 0.08043807145043604, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:25<23:21, 3.93s/it] 32%|███▏ | 164/520 [10:29<23:16, 3.92s/it] {'loss': 1.1063, 'grad_norm': 0.0012113883627246937, 'learning_rate': 0.08019022051627388, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:29<23:16, 3.92s/it] 32%|███▏ | 165/520 [10:33<23:12, 3.92s/it] {'loss': 1.2642, 'grad_norm': 0.001227534650091037, 'learning_rate': 0.07994119656715003, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:33<23:12, 3.92s/it] 32%|███▏ | 166/520 [10:37<23:06, 3.92s/it] {'loss': 1.2233, 'grad_norm': 0.0013822145961531875, 'learning_rate': 0.07969100927867508, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:37<23:06, 3.92s/it] 32%|███▏ | 167/520 [10:41<23:01, 3.91s/it] {'loss': 1.2207, 'grad_norm': 0.0012657625597372066, 'learning_rate': 0.07943966837166024, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:41<23:01, 3.91s/it] 32%|███▏ | 168/520 [10:45<22:57, 3.91s/it] {'loss': 1.162, 'grad_norm': 0.0012370655288635951, 'learning_rate': 0.0791871836117395, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:45<22:57, 3.91s/it] 32%|███▎ | 169/520 [10:49<22:57, 3.92s/it] {'loss': 1.234, 'grad_norm': 0.0012117546927862006, 'learning_rate': 0.0789335648089903, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:49<22:57, 3.92s/it] 33%|███▎ | 170/520 [10:53<23:01, 3.95s/it] {'loss': 1.1763, 'grad_norm': 0.0010571212998027038, 'learning_rate': 0.07867882181755231, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:53<23:01, 3.95s/it] 33%|███▎ | 171/520 [10:57<23:21, 4.02s/it] {'loss': 1.176, 'grad_norm': 0.0013462605828803746, 'learning_rate': 0.07842296453524462, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:57<23:21, 4.02s/it] 33%|███▎ | 172/520 [11:01<23:28, 4.05s/it] {'loss': 1.2493, 'grad_norm': 0.0012614126017645663, 'learning_rate': 0.0781660029031811, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:01<23:28, 4.05s/it] 33%|███▎ | 173/520 [11:05<22:51, 3.95s/it] {'loss': 1.1837, 'grad_norm': 0.0012569661916466632, 'learning_rate': 0.07790794690538422, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:05<22:51, 3.95s/it] 33%|███▎ | 174/520 [11:08<22:26, 3.89s/it] {'loss': 1.233, 'grad_norm': 0.0012879616806212657, 'learning_rate': 0.07764880656839697, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:09<22:26, 3.89s/it] 34%|███▎ | 175/520 [11:12<22:02, 3.83s/it] {'loss': 1.1533, 'grad_norm': 0.0011933041920702673, 'learning_rate': 0.07738859196089358, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:12<22:02, 3.83s/it] 34%|███▍ | 176/520 [11:16<21:45, 3.79s/it] {'loss': 1.2468, 'grad_norm': 0.0012421080917859911, 'learning_rate': 0.07712731319328797, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:16<21:45, 3.79s/it] 34%|███▍ | 177/520 [11:20<21:30, 3.76s/it] {'loss': 1.1301, 'grad_norm': 0.001256878679099175, 'learning_rate': 0.0768649804173412, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:20<21:30, 3.76s/it] 34%|███▍ | 178/520 [11:23<21:20, 3.74s/it] {'loss': 1.2236, 'grad_norm': 0.0013537953550661391, 'learning_rate': 0.07660160382576683, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:23<21:20, 3.74s/it] 34%|███▍ | 179/520 [11:27<21:16, 3.74s/it] {'loss': 1.2945, 'grad_norm': 0.0012196277196757973, 'learning_rate': 0.07633719365183504, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:27<21:16, 3.74s/it] 35%|███▍ | 180/520 [11:31<21:08, 3.73s/it] {'loss': 1.2105, 'grad_norm': 0.0012835178564582022, 'learning_rate': 0.0760717601689749, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:31<21:08, 3.73s/it] 35%|███▍ | 181/520 [11:34<21:06, 3.74s/it] {'loss': 1.1949, 'grad_norm': 0.0011237675064088178, 'learning_rate': 0.07580531369037534, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:34<21:06, 3.74s/it] 35%|███▌ | 182/520 [11:38<21:00, 3.73s/it] {'loss': 1.2066, 'grad_norm': 0.001331327254564284, 'learning_rate': 0.0755378645685843, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:38<21:00, 3.73s/it] 35%|███▌ | 183/520 [11:42<20:54, 3.72s/it] {'loss': 1.2226, 'grad_norm': 0.0012298632787285267, 'learning_rate': 0.07526942319510654, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:42<20:54, 3.72s/it] 35%|███▌ | 184/520 [11:46<20:49, 3.72s/it] {'loss': 1.1653, 'grad_norm': 0.0013277543554444148, 'learning_rate': 0.07500000000000001, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:46<20:49, 3.72s/it] 36%|███▌ | 185/520 [11:49<20:43, 3.71s/it] {'loss': 1.2896, 'grad_norm': 0.0012336695344854578, 'learning_rate': 0.07472960545147038, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:49<20:43, 3.71s/it] 36%|███▌ | 186/520 [11:53<20:35, 3.70s/it] {'loss': 1.187, 'grad_norm': 0.0013040273546555332, 'learning_rate': 0.07445825005546447, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:53<20:35, 3.70s/it] 36%|███▌ | 187/520 [11:57<20:33, 3.70s/it] {'loss': 1.1815, 'grad_norm': 0.001451268079632221, 'learning_rate': 0.07418594435526199, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:57<20:33, 3.70s/it] 36%|███▌ | 188/520 [12:00<20:29, 3.70s/it] {'loss': 1.2759, 'grad_norm': 0.0013140630219594693, 'learning_rate': 0.07391269893106592, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:00<20:29, 3.70s/it] 36%|███▋ | 189/520 [12:04<20:23, 3.70s/it] {'loss': 1.271, 'grad_norm': 0.0011666043177609104, 'learning_rate': 0.07363852439959136, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:04<20:23, 3.70s/it] 37%|███▋ | 190/520 [12:08<20:16, 3.69s/it] {'loss': 1.1963, 'grad_norm': 0.0013603266204750232, 'learning_rate': 0.0733634314136531, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:08<20:16, 3.69s/it] 37%|███▋ | 191/520 [12:11<20:10, 3.68s/it] {'loss': 1.1593, 'grad_norm': 0.0012215681224136056, 'learning_rate': 0.0730874306617517, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:11<20:10, 3.68s/it] 37%|███▋ | 192/520 [12:15<20:09, 3.69s/it] {'loss': 1.235, 'grad_norm': 0.0012140681443256385, 'learning_rate': 0.07281053286765815, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:15<20:09, 3.69s/it] 37%|███▋ | 193/520 [12:19<20:06, 3.69s/it] {'loss': 1.1826, 'grad_norm': 0.0013447568971956384, 'learning_rate': 0.07253274878999727, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:19<20:06, 3.69s/it] 37%|███▋ | 194/520 [12:22<20:00, 3.68s/it] {'loss': 1.0788, 'grad_norm': 0.001106716529565061, 'learning_rate': 0.07225408922182962, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:22<20:00, 3.68s/it] 38%|███▊ | 195/520 [12:26<20:00, 3.69s/it] {'loss': 1.2506, 'grad_norm': 0.0012404062874993205, 'learning_rate': 0.07197456499023225, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:26<20:00, 3.69s/it] 38%|███▊ | 196/520 [12:30<19:50, 3.67s/it] {'loss': 1.2374, 'grad_norm': 0.0014329496221244472, 'learning_rate': 0.07169418695587791, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:30<19:50, 3.67s/it] 38%|███▊ | 197/520 [12:33<19:46, 3.67s/it] {'loss': 1.1812, 'grad_norm': 0.0012928102303760018, 'learning_rate': 0.07141296601261314, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:33<19:46, 3.67s/it] 38%|███▊ | 198/520 [12:37<19:46, 3.68s/it] {'loss': 1.2471, 'grad_norm': 0.0013724723637289658, 'learning_rate': 0.07113091308703498, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:37<19:46, 3.68s/it] 38%|███▊ | 199/520 [12:41<19:40, 3.68s/it] {'loss': 1.1644, 'grad_norm': 0.0012788272505470303, 'learning_rate': 0.07084803913806641, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:41<19:40, 3.68s/it] 38%|███▊ | 200/520 [12:45<19:35, 3.67s/it] {'loss': 1.1401, 'grad_norm': 0.0012352245585693913, 'learning_rate': 0.07056435515653059, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:45<19:35, 3.67s/it] 39%|███▊ | 201/520 [12:48<19:46, 3.72s/it] {'loss': 1.157, 'grad_norm': 0.0011083806450386013, 'learning_rate': 0.07027987216472376, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:48<19:46, 3.72s/it] 39%|███▉ | 202/520 [12:52<19:49, 3.74s/it] {'loss': 1.1813, 'grad_norm': 0.0012787984223290878, 'learning_rate': 0.06999460121598704, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:52<19:49, 3.74s/it] 39%|███▉ | 203/520 [12:56<19:51, 3.76s/it] {'loss': 1.2187, 'grad_norm': 0.0012889908865621718, 'learning_rate': 0.06970855339427698, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:56<19:51, 3.76s/it] 39%|███▉ | 204/520 [13:00<19:50, 3.77s/it] {'loss': 1.2346, 'grad_norm': 0.001304165371568663, 'learning_rate': 0.06942173981373474, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:00<19:50, 3.77s/it] 39%|███▉ | 205/520 [13:04<19:48, 3.77s/it] {'loss': 1.1561, 'grad_norm': 0.0011624793523550472, 'learning_rate': 0.0691341716182545, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:04<19:48, 3.77s/it] 40%|███▉ | 206/520 [13:07<19:49, 3.79s/it] {'loss': 1.2579, 'grad_norm': 0.0012153037234954747, 'learning_rate': 0.06884585998105026, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:07<19:49, 3.79s/it] 40%|███▉ | 207/520 [13:11<19:47, 3.79s/it] {'loss': 1.1228, 'grad_norm': 0.0010727403711432559, 'learning_rate': 0.0685568161042219, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:11<19:47, 3.79s/it] 40%|████ | 208/520 [13:15<19:46, 3.80s/it] {'loss': 1.2614, 'grad_norm': 0.0014089559363209847, 'learning_rate': 0.06826705121831976, 'epoch': 0.4} + 40%|████ | 208/520 [13:15<19:46, 3.80s/it] 40%|████ | 209/520 [13:19<19:44, 3.81s/it] {'loss': 1.1757, 'grad_norm': 0.0011963690165770558, 'learning_rate': 0.06797657658190838, 'epoch': 0.4} + 40%|████ | 209/520 [13:19<19:44, 3.81s/it] 40%|████ | 210/520 [13:23<19:38, 3.80s/it] {'loss': 1.2407, 'grad_norm': 0.0012659558671813105, 'learning_rate': 0.06768540348112907, 'epoch': 0.4} + 40%|████ | 210/520 [13:23<19:38, 3.80s/it] 41%|████ | 211/520 [13:26<19:36, 3.81s/it] {'loss': 1.241, 'grad_norm': 0.0011526584165331484, 'learning_rate': 0.06739354322926136, 'epoch': 0.41} + 41%|████ | 211/520 [13:26<19:36, 3.81s/it] 41%|████ | 212/520 [13:30<19:21, 3.77s/it] {'loss': 1.2428, 'grad_norm': 0.0012230081602086565, 'learning_rate': 0.06710100716628345, 'epoch': 0.41} + 41%|████ | 212/520 [13:30<19:21, 3.77s/it] 41%|████ | 213/520 [13:34<19:09, 3.74s/it] {'loss': 1.1929, 'grad_norm': 0.0014117479064896666, 'learning_rate': 0.06680780665843154, 'epoch': 0.41} + 41%|████ | 213/520 [13:34<19:09, 3.74s/it] 41%|████ | 214/520 [13:37<19:00, 3.73s/it] {'loss': 1.1873, 'grad_norm': 0.0012748846871397158, 'learning_rate': 0.06651395309775836, 'epoch': 0.41} + 41%|████ | 214/520 [13:37<19:00, 3.73s/it] 41%|████▏ | 215/520 [13:41<18:53, 3.72s/it] {'loss': 1.0967, 'grad_norm': 0.0011724562948399063, 'learning_rate': 0.06621945790169036, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:41<18:53, 3.72s/it] 42%|████▏ | 216/520 [13:45<18:56, 3.74s/it] {'loss': 1.1076, 'grad_norm': 0.0012377446978693186, 'learning_rate': 0.06592433251258423, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:45<18:56, 3.74s/it] 42%|████▏ | 217/520 [13:49<18:51, 3.73s/it] {'loss': 1.2381, 'grad_norm': 0.0013046457864630675, 'learning_rate': 0.06562858839728224, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:49<18:51, 3.73s/it] 42%|████▏ | 218/520 [13:52<18:44, 3.72s/it] {'loss': 1.2111, 'grad_norm': 0.0014233952308516614, 'learning_rate': 0.06533223704666673, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:52<18:44, 3.72s/it] 42%|████▏ | 219/520 [13:56<18:46, 3.74s/it] {'loss': 1.2311, 'grad_norm': 0.0011920910401783998, 'learning_rate': 0.06503528997521366, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:56<18:46, 3.74s/it] 42%|████▏ | 220/520 [14:00<18:37, 3.72s/it] {'loss': 1.1426, 'grad_norm': 0.001149706290473159, 'learning_rate': 0.06473775872054521, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:00<18:37, 3.72s/it] 42%|████▎ | 221/520 [14:04<18:29, 3.71s/it] {'loss': 1.222, 'grad_norm': 0.0012558314181697994, 'learning_rate': 0.0644396548429815, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:04<18:29, 3.71s/it] 43%|████▎ | 222/520 [14:07<18:30, 3.73s/it] {'loss': 1.1686, 'grad_norm': 0.0012475331855027237, 'learning_rate': 0.06414098992509137, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:07<18:30, 3.73s/it] 43%|████▎ | 223/520 [14:11<18:37, 3.76s/it] {'loss': 1.1643, 'grad_norm': 0.0012518098759539047, 'learning_rate': 0.06384177557124247, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:11<18:37, 3.76s/it] 43%|████▎ | 224/520 [14:15<18:42, 3.79s/it] {'loss': 1.188, 'grad_norm': 0.0010930793375783282, 'learning_rate': 0.06354202340715026, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:15<18:42, 3.79s/it] 43%|████▎ | 225/520 [14:19<18:44, 3.81s/it] {'loss': 1.1637, 'grad_norm': 0.0012837389117351385, 'learning_rate': 0.06324174507942636, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:19<18:44, 3.81s/it] 43%|████▎ | 226/520 [14:23<18:43, 3.82s/it] {'loss': 1.2631, 'grad_norm': 0.0012286630038779844, 'learning_rate': 0.06294095225512604, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:23<18:43, 3.82s/it] 44%|████▎ | 227/520 [14:27<18:50, 3.86s/it] {'loss': 1.247, 'grad_norm': 0.001205016730327835, 'learning_rate': 0.06263965662129488, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:27<18:50, 3.86s/it] 44%|████▍ | 228/520 [14:31<18:52, 3.88s/it] {'loss': 1.2319, 'grad_norm': 0.0012219992958827312, 'learning_rate': 0.062337869884514674, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:31<18:52, 3.88s/it] 44%|████▍ | 229/520 [14:34<18:47, 3.87s/it] {'loss': 1.2207, 'grad_norm': 0.0011884761481119534, 'learning_rate': 0.06203560377044866, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:34<18:47, 3.87s/it] 44%|████▍ | 230/520 [14:38<18:41, 3.87s/it] {'loss': 1.1153, 'grad_norm': 0.001177884874073229, 'learning_rate': 0.06173287002338577, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:38<18:41, 3.87s/it] 44%|████▍ | 231/520 [14:42<18:34, 3.86s/it] {'loss': 1.1823, 'grad_norm': 0.0011767888145647902, 'learning_rate': 0.06142968040578448, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:42<18:34, 3.86s/it] 45%|████▍ | 232/520 [14:46<18:32, 3.86s/it] {'loss': 1.2696, 'grad_norm': 0.001238246236091714, 'learning_rate': 0.06112604669781572, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:46<18:32, 3.86s/it] 45%|████▍ | 233/520 [14:50<18:34, 3.88s/it] {'loss': 1.1639, 'grad_norm': 0.0012931134073402375, 'learning_rate': 0.06082198069690514, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:50<18:34, 3.88s/it] 45%|████▌ | 234/520 [14:54<18:26, 3.87s/it] {'loss': 1.1408, 'grad_norm': 0.0013084537845834087, 'learning_rate': 0.06051749421727479, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:54<18:26, 3.87s/it] 45%|████▌ | 235/520 [14:58<18:19, 3.86s/it] {'loss': 1.1882, 'grad_norm': 0.0012775000348480292, 'learning_rate': 0.06021259908948402, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:58<18:19, 3.86s/it] 45%|████▌ | 236/520 [15:01<18:02, 3.81s/it] {'loss': 1.2509, 'grad_norm': 0.0013232911633538733, 'learning_rate': 0.059907307159969884, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:01<18:02, 3.81s/it] 46%|████▌ | 237/520 [15:05<17:56, 3.80s/it] {'loss': 1.2565, 'grad_norm': 0.00123151961949556, 'learning_rate': 0.05960163029058682, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:05<17:56, 3.80s/it] 46%|████▌ | 238/520 [15:09<17:59, 3.83s/it] {'loss': 1.1902, 'grad_norm': 0.001289023959569695, 'learning_rate': 0.05929558035814574, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:09<17:59, 3.83s/it] 46%|████▌ | 239/520 [15:13<17:45, 3.79s/it] {'loss': 1.2531, 'grad_norm': 0.0012864540117486934, 'learning_rate': 0.05898916925395264, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:13<17:45, 3.79s/it] 46%|████▌ | 240/520 [15:16<17:40, 3.79s/it] {'loss': 1.0857, 'grad_norm': 0.0011542774794987866, 'learning_rate': 0.058682408883346526, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:16<17:40, 3.79s/it] 46%|████▋ | 241/520 [15:20<17:44, 3.82s/it] {'loss': 1.1647, 'grad_norm': 0.001241138840745042, 'learning_rate': 0.05837531116523682, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:20<17:44, 3.82s/it] 47%|████▋ | 242/520 [15:24<17:45, 3.83s/it] {'loss': 1.1725, 'grad_norm': 0.0011856226510005403, 'learning_rate': 0.05806788803164034, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:24<17:45, 3.83s/it] 47%|████▋ | 243/520 [15:28<17:46, 3.85s/it] {'loss': 1.1682, 'grad_norm': 0.0012438697905174023, 'learning_rate': 0.057760151427217576, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:28<17:46, 3.85s/it] 47%|████▋ | 244/520 [15:32<17:43, 3.85s/it] {'loss': 1.2658, 'grad_norm': 0.0012268653526531128, 'learning_rate': 0.05745211330880872, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:32<17:43, 3.85s/it] 47%|████▋ | 245/520 [15:36<17:45, 3.88s/it] {'loss': 1.1524, 'grad_norm': 0.00128632664192682, 'learning_rate': 0.057143785644969004, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:36<17:45, 3.88s/it] 47%|████▋ | 246/520 [15:40<17:45, 3.89s/it] {'loss': 1.2651, 'grad_norm': 0.0012488971170708778, 'learning_rate': 0.05683518041550367, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:40<17:45, 3.89s/it] 48%|████▊ | 247/520 [15:44<17:40, 3.88s/it] {'loss': 1.3227, 'grad_norm': 0.0012961789929774804, 'learning_rate': 0.05652630961100259, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:44<17:40, 3.88s/it] 48%|████▊ | 248/520 [15:48<17:40, 3.90s/it] {'loss': 1.1464, 'grad_norm': 0.001280309680748555, 'learning_rate': 0.05621718523237427, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:48<17:40, 3.90s/it] 48%|████▊ | 249/520 [15:51<17:33, 3.89s/it] {'loss': 1.2354, 'grad_norm': 0.001265831344854613, 'learning_rate': 0.05590781929037965, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:51<17:33, 3.89s/it] 48%|████▊ | 250/520 [15:55<17:30, 3.89s/it] {'loss': 1.1858, 'grad_norm': 0.001308477856209416, 'learning_rate': 0.055598223805165395, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:55<17:30, 3.89s/it] 48%|████▊ | 251/520 [15:59<17:26, 3.89s/it] {'loss': 1.2485, 'grad_norm': 0.0011839549529029767, 'learning_rate': 0.0552884108057969, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:59<17:26, 3.89s/it] 48%|████▊ | 252/520 [16:03<17:20, 3.88s/it] {'loss': 1.1691, 'grad_norm': 0.0011550737698529949, 'learning_rate': 0.05497839232979084, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:03<17:20, 3.88s/it] 49%|████▊ | 253/520 [16:07<17:16, 3.88s/it] {'loss': 1.2294, 'grad_norm': 0.0013405193841596649, 'learning_rate': 0.05466818042264753, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:07<17:16, 3.88s/it] 49%|████▉ | 254/520 [16:11<17:11, 3.88s/it] {'loss': 1.1768, 'grad_norm': 0.0011955023474553876, 'learning_rate': 0.05435778713738292, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:11<17:11, 3.88s/it] 49%|████▉ | 255/520 [16:15<17:09, 3.88s/it] {'loss': 1.1784, 'grad_norm': 0.0013028761945164005, 'learning_rate': 0.05404722453406017, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:15<17:09, 3.88s/it] 49%|████▉ | 256/520 [16:19<17:02, 3.87s/it] {'loss': 1.2344, 'grad_norm': 0.0013159067023227487, 'learning_rate': 0.05373650467932122, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:19<17:02, 3.87s/it] 49%|████▉ | 257/520 [16:22<16:55, 3.86s/it] {'loss': 1.2152, 'grad_norm': 0.0012638030972947595, 'learning_rate': 0.05342563964591784, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:22<16:55, 3.86s/it] 50%|████▉ | 258/520 [16:26<16:51, 3.86s/it] {'loss': 1.2198, 'grad_norm': 0.0010896529070261235, 'learning_rate': 0.053114641512242614, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:26<16:51, 3.86s/it] 50%|████▉ | 259/520 [16:30<16:48, 3.86s/it] {'loss': 1.293, 'grad_norm': 0.001391054325185979, 'learning_rate': 0.05280352236185959, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:30<16:48, 3.86s/it] 50%|█████ | 260/520 [16:34<16:37, 3.84s/it] {'loss': 1.2131, 'grad_norm': 0.000991649645310665, 'learning_rate': 0.05249229428303486, 'epoch': 0.5} + 50%|█████ | 260/520 [16:34<16:37, 3.84s/it] 50%|█████ | 261/520 [16:38<16:36, 3.85s/it] {'loss': 1.1608, 'grad_norm': 0.001207476797966425, 'learning_rate': 0.05218096936826681, 'epoch': 0.5} + 50%|█████ | 261/520 [16:38<16:36, 3.85s/it] 50%|█████ | 262/520 [16:42<16:36, 3.86s/it] {'loss': 1.1647, 'grad_norm': 0.0012747575663490567, 'learning_rate': 0.05186955971381629, 'epoch': 0.5} + 50%|█████ | 262/520 [16:42<16:36, 3.86s/it] 51%|█████ | 263/520 [16:46<16:36, 3.88s/it] {'loss': 1.1865, 'grad_norm': 0.0012107698959396688, 'learning_rate': 0.05155807741923666, 'epoch': 0.51} + 51%|█████ | 263/520 [16:46<16:36, 3.88s/it] 51%|█████ | 264/520 [16:49<16:31, 3.87s/it] {'loss': 1.2449, 'grad_norm': 0.0011886753259684646, 'learning_rate': 0.05124653458690365, 'epoch': 0.51} + 51%|█████ | 264/520 [16:49<16:31, 3.87s/it] 51%|█████ | 265/520 [16:53<16:22, 3.85s/it] {'loss': 1.1715, 'grad_norm': 0.001404279453368601, 'learning_rate': 0.05093494332154511, 'epoch': 0.51} + 51%|█████ | 265/520 [16:53<16:22, 3.85s/it] 51%|█████ | 266/520 [16:57<16:19, 3.86s/it] {'loss': 1.0561, 'grad_norm': 0.0011078390062998528, 'learning_rate': 0.05062331572977076, 'epoch': 0.51} + 51%|█████ | 266/520 [16:57<16:19, 3.86s/it] 51%|█████▏ | 267/520 [17:01<16:18, 3.87s/it] {'loss': 1.1686, 'grad_norm': 0.0011998808347128864, 'learning_rate': 0.05031166391960168, 'epoch': 0.51} + 51%|█████▏ | 267/520 [17:01<16:18, 3.87s/it] 52%|█████▏ | 268/520 [17:05<16:12, 3.86s/it] {'loss': 1.2817, 'grad_norm': 0.001211139859847331, 'learning_rate': 0.05, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:05<16:12, 3.86s/it] 52%|█████▏ | 269/520 [17:09<16:08, 3.86s/it] {'loss': 1.2703, 'grad_norm': 0.0013347483992562483, 'learning_rate': 0.049688336080398326, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:09<16:08, 3.86s/it] 52%|█████▏ | 270/520 [17:13<16:02, 3.85s/it] {'loss': 1.1281, 'grad_norm': 0.0012217382485062312, 'learning_rate': 0.04937668427022925, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:13<16:02, 3.85s/it] 52%|█████▏ | 271/520 [17:16<16:00, 3.86s/it] {'loss': 1.2458, 'grad_norm': 0.0012906888624731416, 'learning_rate': 0.0490650566784549, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:16<16:00, 3.86s/it] 52%|█████▏ | 272/520 [17:20<15:58, 3.86s/it] {'loss': 1.1407, 'grad_norm': 0.0011841861082505867, 'learning_rate': 0.048753465413096365, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:20<15:58, 3.86s/it] 52%|█████▎ | 273/520 [17:24<15:52, 3.86s/it] {'loss': 1.2401, 'grad_norm': 0.0011820208173194367, 'learning_rate': 0.04844192258076335, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:24<15:52, 3.86s/it] 53%|█████▎ | 274/520 [17:28<15:39, 3.82s/it] {'loss': 1.238, 'grad_norm': 0.0013254444597109287, 'learning_rate': 0.048130440286183726, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:28<15:39, 3.82s/it] 53%|█████▎ | 275/520 [17:32<15:24, 3.77s/it] {'loss': 1.1776, 'grad_norm': 0.001245708067633189, 'learning_rate': 0.047819030631733206, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:32<15:24, 3.77s/it] 53%|█████▎ | 276/520 [17:35<15:17, 3.76s/it] {'loss': 1.2338, 'grad_norm': 0.0013444372615484988, 'learning_rate': 0.04750770571696514, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:35<15:17, 3.76s/it] 53%|█████▎ | 277/520 [17:39<15:07, 3.73s/it] {'loss': 1.2549, 'grad_norm': 0.001135176918661573, 'learning_rate': 0.04719647763814041, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:39<15:07, 3.73s/it] 53%|█████▎ | 278/520 [17:43<15:01, 3.73s/it] {'loss': 1.1397, 'grad_norm': 0.0011384534211141412, 'learning_rate': 0.0468853584877574, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:43<15:01, 3.73s/it] 54%|█████▎ | 279/520 [17:46<14:55, 3.72s/it] {'loss': 1.1314, 'grad_norm': 0.0012660009498990147, 'learning_rate': 0.04657436035408217, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:46<14:55, 3.72s/it] 54%|█████▍ | 280/520 [17:50<14:50, 3.71s/it] {'loss': 1.1674, 'grad_norm': 0.0013471974875586934, 'learning_rate': 0.04626349532067879, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:50<14:50, 3.71s/it] 54%|█████▍ | 281/520 [17:54<14:45, 3.71s/it] {'loss': 1.2694, 'grad_norm': 0.0013053215255271363, 'learning_rate': 0.04595277546593984, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:54<14:45, 3.71s/it] 54%|█████▍ | 282/520 [17:57<14:41, 3.70s/it] {'loss': 1.1502, 'grad_norm': 0.0011884796092459782, 'learning_rate': 0.04564221286261709, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:57<14:41, 3.70s/it] 54%|█████▍ | 283/520 [18:01<14:38, 3.71s/it] {'loss': 1.2799, 'grad_norm': 0.0013488813897457766, 'learning_rate': 0.045331819577352474, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:01<14:38, 3.71s/it] 55%|█████▍ | 284/520 [18:05<14:47, 3.76s/it] {'loss': 1.1397, 'grad_norm': 0.001294405306314203, 'learning_rate': 0.04502160767020918, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:05<14:47, 3.76s/it] 55%|█████▍ | 285/520 [18:09<15:10, 3.88s/it] {'loss': 1.1739, 'grad_norm': 0.0012462196402785264, 'learning_rate': 0.04471158919420312, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:09<15:10, 3.88s/it] 55%|█████▌ | 286/520 [18:13<15:25, 3.96s/it] {'loss': 1.0578, 'grad_norm': 0.0012508336612108214, 'learning_rate': 0.04440177619483461, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:13<15:25, 3.96s/it] 55%|█████▌ | 287/520 [18:17<15:35, 4.02s/it] {'loss': 1.2832, 'grad_norm': 0.0012307840226624415, 'learning_rate': 0.044092180709620364, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:18<15:35, 4.02s/it] 55%|█████▌ | 288/520 [18:22<15:42, 4.06s/it] {'loss': 1.2994, 'grad_norm': 0.001156243239109196, 'learning_rate': 0.04378281476762576, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:22<15:42, 4.06s/it] 56%|█████▌ | 289/520 [18:26<15:45, 4.09s/it] {'loss': 1.1931, 'grad_norm': 0.0011970568452510275, 'learning_rate': 0.043473690388997434, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:26<15:45, 4.09s/it] 56%|█████▌ | 290/520 [18:30<15:34, 4.06s/it] {'loss': 1.1157, 'grad_norm': 0.0011395695005246666, 'learning_rate': 0.04316481958449634, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:30<15:34, 4.06s/it] 56%|█████▌ | 291/520 [18:34<15:20, 4.02s/it] {'loss': 1.15, 'grad_norm': 0.0012160060902859386, 'learning_rate': 0.04285621435503101, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:34<15:20, 4.02s/it] 56%|█████▌ | 292/520 [18:38<15:24, 4.05s/it] {'loss': 1.2102, 'grad_norm': 0.0012326560090451728, 'learning_rate': 0.04254788669119128, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:38<15:24, 4.05s/it] 56%|█████▋ | 293/520 [18:42<15:19, 4.05s/it] {'loss': 1.1589, 'grad_norm': 0.001298035925199455, 'learning_rate': 0.04223984857278242, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:42<15:19, 4.05s/it] 57%|█████▋ | 294/520 [18:46<15:07, 4.02s/it] {'loss': 1.1788, 'grad_norm': 0.0013725563438322754, 'learning_rate': 0.041932111968359664, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:46<15:07, 4.02s/it] 57%|█████▋ | 295/520 [18:50<15:04, 4.02s/it] {'loss': 1.1673, 'grad_norm': 0.0011058034791480939, 'learning_rate': 0.04162468883476319, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:50<15:04, 4.02s/it] 57%|█████▋ | 296/520 [18:54<15:05, 4.04s/it] {'loss': 1.1312, 'grad_norm': 0.0012775635702542785, 'learning_rate': 0.041317591116653486, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:54<15:05, 4.04s/it] 57%|█████▋ | 297/520 [18:58<14:57, 4.02s/it] {'loss': 1.258, 'grad_norm': 0.0013253042108247715, 'learning_rate': 0.04101083074604737, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:58<14:57, 4.02s/it] 57%|█████▋ | 298/520 [19:02<14:47, 4.00s/it] {'loss': 1.2202, 'grad_norm': 0.001169348444389335, 'learning_rate': 0.04070441964185428, 'epoch': 0.57} + 57%|█████▋ | 298/520 [19:02<14:47, 4.00s/it] 57%|█████▊ | 299/520 [19:06<14:39, 3.98s/it] {'loss': 1.2224, 'grad_norm': 0.001180974264005515, 'learning_rate': 0.0403983697094132, 'epoch': 0.57} + 57%|█████▊ | 299/520 [19:06<14:39, 3.98s/it] 58%|█████▊ | 300/520 [19:10<14:19, 3.91s/it] {'loss': 1.2668, 'grad_norm': 0.0012363831360034001, 'learning_rate': 0.040092692840030135, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:10<14:19, 3.91s/it] 58%|█████▊ | 301/520 [19:13<14:00, 3.84s/it] {'loss': 1.2595, 'grad_norm': 0.0012570802311684792, 'learning_rate': 0.039787400910515996, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:13<14:00, 3.84s/it] 58%|█████▊ | 302/520 [19:17<13:51, 3.81s/it] {'loss': 1.2217, 'grad_norm': 0.0011606354194035183, 'learning_rate': 0.03948250578272522, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:17<13:51, 3.81s/it] 58%|█████▊ | 303/520 [19:21<13:38, 3.77s/it] {'loss': 1.176, 'grad_norm': 0.001353752132862736, 'learning_rate': 0.03917801930309486, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:21<13:38, 3.77s/it] 58%|█████▊ | 304/520 [19:24<13:30, 3.75s/it] {'loss': 1.1384, 'grad_norm': 0.001254844853972397, 'learning_rate': 0.03887395330218429, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:24<13:30, 3.75s/it] 59%|█████▊ | 305/520 [19:28<13:25, 3.75s/it] {'loss': 1.2784, 'grad_norm': 0.0014303192735212447, 'learning_rate': 0.03857031959421553, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:28<13:25, 3.75s/it] 59%|█████▉ | 306/520 [19:32<13:29, 3.78s/it] {'loss': 1.2249, 'grad_norm': 0.0012829979215593374, 'learning_rate': 0.03826712997661425, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:32<13:29, 3.78s/it] 59%|█████▉ | 307/520 [19:36<13:51, 3.90s/it] {'loss': 1.1692, 'grad_norm': 0.0011982201603182793, 'learning_rate': 0.03796439622955136, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:36<13:51, 3.90s/it] 59%|█████▉ | 308/520 [19:40<13:33, 3.84s/it] {'loss': 1.2825, 'grad_norm': 0.0011876976571241534, 'learning_rate': 0.03766213011548532, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:40<13:33, 3.84s/it] 59%|█████▉ | 309/520 [19:43<13:16, 3.77s/it] {'loss': 1.176, 'grad_norm': 0.00122218239272598, 'learning_rate': 0.03736034337870512, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:43<13:16, 3.77s/it] 60%|█████▉ | 310/520 [19:47<13:08, 3.75s/it] {'loss': 1.1504, 'grad_norm': 0.0012404768537811212, 'learning_rate': 0.03705904774487396, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:47<13:08, 3.75s/it] 60%|█████▉ | 311/520 [19:51<12:59, 3.73s/it] {'loss': 1.1375, 'grad_norm': 0.0012065044094296958, 'learning_rate': 0.036758254920573635, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:51<12:59, 3.73s/it] 60%|██████ | 312/520 [19:55<12:50, 3.70s/it] {'loss': 1.1272, 'grad_norm': 0.0012937188356458337, 'learning_rate': 0.03645797659284975, 'epoch': 0.6} + 60%|██████ | 312/520 [19:55<12:50, 3.70s/it] 60%|██████ | 313/520 [19:58<12:48, 3.71s/it] {'loss': 1.1019, 'grad_norm': 0.0011167784526029394, 'learning_rate': 0.03615822442875754, 'epoch': 0.6} + 60%|██████ | 313/520 [19:58<12:48, 3.71s/it] 60%|██████ | 314/520 [20:02<13:07, 3.82s/it] {'loss': 1.1439, 'grad_norm': 0.001214175084892329, 'learning_rate': 0.035859010074908625, 'epoch': 0.6} + 60%|██████ | 314/520 [20:02<13:07, 3.82s/it] 61%|██████ | 315/520 [20:06<12:54, 3.78s/it] {'loss': 1.1845, 'grad_norm': 0.001327098184689753, 'learning_rate': 0.035560345157018516, 'epoch': 0.61} + 61%|██████ | 315/520 [20:06<12:54, 3.78s/it] 61%|██████ | 316/520 [20:10<13:19, 3.92s/it] {'loss': 1.1315, 'grad_norm': 0.0012782750112958366, 'learning_rate': 0.035262241279454785, 'epoch': 0.61} + 61%|██████ | 316/520 [20:10<13:19, 3.92s/it] 61%|██████ | 317/520 [20:14<13:10, 3.89s/it] {'loss': 1.1322, 'grad_norm': 0.0011078370425452913, 'learning_rate': 0.03496471002478635, 'epoch': 0.61} + 61%|██████ | 317/520 [20:14<13:10, 3.89s/it] 61%|██████ | 318/520 [20:18<12:56, 3.84s/it] {'loss': 1.2407, 'grad_norm': 0.0012959496768081063, 'learning_rate': 0.03466776295333329, 'epoch': 0.61} + 61%|██████ | 318/520 [20:18<12:56, 3.84s/it] 61%|██████▏ | 319/520 [20:22<13:03, 3.90s/it] {'loss': 1.1262, 'grad_norm': 0.00111103204430234, 'learning_rate': 0.03437141160271778, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:22<13:03, 3.90s/it] 62%|██████▏ | 320/520 [20:25<12:46, 3.83s/it] {'loss': 1.0717, 'grad_norm': 0.0012129400269931736, 'learning_rate': 0.034075667487415785, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:26<12:46, 3.83s/it] 62%|██████▏ | 321/520 [20:29<12:37, 3.81s/it] {'loss': 1.2651, 'grad_norm': 0.0012067855145828687, 'learning_rate': 0.033780542098309656, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:29<12:37, 3.81s/it] 62%|██████▏ | 322/520 [20:33<12:33, 3.80s/it] {'loss': 1.0829, 'grad_norm': 0.0011615830851705216, 'learning_rate': 0.03348604690224166, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:33<12:33, 3.80s/it] 62%|██████▏ | 323/520 [20:37<12:25, 3.78s/it] {'loss': 1.158, 'grad_norm': 0.0012394958589230611, 'learning_rate': 0.03319219334156847, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:37<12:25, 3.78s/it] 62%|██████▏ | 324/520 [20:40<12:17, 3.76s/it] {'loss': 1.211, 'grad_norm': 0.0012623690614006076, 'learning_rate': 0.03289899283371657, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:40<12:17, 3.76s/it] 62%|██████▎ | 325/520 [20:44<12:10, 3.75s/it] {'loss': 1.2052, 'grad_norm': 0.001310177441779659, 'learning_rate': 0.03260645677073864, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:44<12:10, 3.75s/it] 63%|██████▎ | 326/520 [20:48<12:11, 3.77s/it] {'loss': 1.204, 'grad_norm': 0.001331075787206125, 'learning_rate': 0.03231459651887093, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:48<12:11, 3.77s/it] 63%|██████▎ | 327/520 [20:52<12:11, 3.79s/it] {'loss': 1.1867, 'grad_norm': 0.0012550772829789517, 'learning_rate': 0.032023423418091626, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:52<12:11, 3.79s/it] 63%|██████▎ | 328/520 [20:56<12:07, 3.79s/it] {'loss': 1.2448, 'grad_norm': 0.0012798640993222355, 'learning_rate': 0.03173294878168025, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:56<12:07, 3.79s/it] 63%|██████▎ | 329/520 [20:59<11:57, 3.75s/it] {'loss': 1.1282, 'grad_norm': 0.0011111752395575104, 'learning_rate': 0.031443183895778104, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:59<11:57, 3.75s/it] 63%|██████▎ | 330/520 [21:03<11:49, 3.74s/it] {'loss': 1.2065, 'grad_norm': 0.0011566491309545582, 'learning_rate': 0.03115414001894974, 'epoch': 0.63} + 63%|██████▎ | 330/520 [21:03<11:49, 3.74s/it] 64%|██████▎ | 331/520 [21:07<11:43, 3.72s/it] {'loss': 1.1639, 'grad_norm': 0.0013072612549243341, 'learning_rate': 0.030865828381745515, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:07<11:43, 3.72s/it] 64%|██████▍ | 332/520 [21:10<11:37, 3.71s/it] {'loss': 1.2151, 'grad_norm': 0.0011245205012375206, 'learning_rate': 0.030578260186265267, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:10<11:37, 3.71s/it] 64%|██████▍ | 333/520 [21:14<11:33, 3.71s/it] {'loss': 1.2989, 'grad_norm': 0.0013196192399112503, 'learning_rate': 0.03029144660572304, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:14<11:33, 3.71s/it] 64%|██████▍ | 334/520 [21:18<11:29, 3.71s/it] {'loss': 1.2085, 'grad_norm': 0.0013206287944875139, 'learning_rate': 0.03000539878401296, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:18<11:29, 3.71s/it] 64%|██████▍ | 335/520 [21:21<11:25, 3.70s/it] {'loss': 1.2089, 'grad_norm': 0.0012019062768157903, 'learning_rate': 0.029720127835276257, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:21<11:25, 3.70s/it] 65%|██████▍ | 336/520 [21:25<11:21, 3.71s/it] {'loss': 1.119, 'grad_norm': 0.0013475651415964947, 'learning_rate': 0.029435644843469434, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:25<11:21, 3.71s/it] 65%|██████▍ | 337/520 [21:29<11:19, 3.71s/it] {'loss': 1.1099, 'grad_norm': 0.001254590548055689, 'learning_rate': 0.029151960861933612, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:29<11:19, 3.71s/it] 65%|██████▌ | 338/520 [21:33<11:13, 3.70s/it] {'loss': 1.2174, 'grad_norm': 0.0012538816808582145, 'learning_rate': 0.02886908691296504, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:33<11:13, 3.70s/it] 65%|██████▌ | 339/520 [21:36<11:13, 3.72s/it] {'loss': 1.1622, 'grad_norm': 0.0013011375911966478, 'learning_rate': 0.028587033987386858, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:36<11:13, 3.72s/it] 65%|██████▌ | 340/520 [21:40<11:10, 3.72s/it] {'loss': 1.1463, 'grad_norm': 0.0011957232836654044, 'learning_rate': 0.028305813044122097, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:40<11:10, 3.72s/it] 66%|██████▌ | 341/520 [21:44<11:04, 3.71s/it] {'loss': 1.1787, 'grad_norm': 0.001322185503770851, 'learning_rate': 0.028025435009767747, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:44<11:04, 3.71s/it] 66%|██████▌ | 342/520 [21:48<11:02, 3.72s/it] {'loss': 1.1937, 'grad_norm': 0.0014200888544519683, 'learning_rate': 0.02774591077817038, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:48<11:02, 3.72s/it] 66%|██████▌ | 343/520 [21:51<10:57, 3.72s/it] {'loss': 1.1435, 'grad_norm': 0.001013295361850821, 'learning_rate': 0.027467251210002732, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:51<10:57, 3.72s/it] 66%|██████▌ | 344/520 [21:55<10:55, 3.73s/it] {'loss': 1.135, 'grad_norm': 0.001133819987380442, 'learning_rate': 0.02718946713234185, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:55<10:55, 3.73s/it] 66%|██████▋ | 345/520 [21:59<10:50, 3.72s/it] {'loss': 1.232, 'grad_norm': 0.0012578323746604621, 'learning_rate': 0.026912569338248316, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:59<10:50, 3.72s/it] 67%|██████▋ | 346/520 [22:02<10:47, 3.72s/it] {'loss': 1.1619, 'grad_norm': 0.0011970652972537, 'learning_rate': 0.0266365685863469, 'epoch': 0.67} + 67%|██████▋ | 346/520 [22:02<10:47, 3.72s/it] 67%|██████▋ | 347/520 [22:06<10:44, 3.73s/it] {'loss': 1.152, 'grad_norm': 0.0011332854036848025, 'learning_rate': 0.02636147560040866, 'epoch': 0.67} + 67%|██████▋ | 347/520 [22:06<10:44, 3.73s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:10<10:39, 3.72s/it] {'loss': 1.112, 'grad_norm': 0.0014524273233965986, 'learning_rate': 0.026087301068934105, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:10<10:39, 3.72s/it] 67%|██████▋ | 349/520 [22:14<10:32, 3.70s/it] {'loss': 1.1438, 'grad_norm': 0.001244556509960366, 'learning_rate': 0.025814055644738012, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:14<10:32, 3.70s/it] 67%|██████▋ | 350/520 [22:17<10:28, 3.70s/it] {'loss': 1.1895, 'grad_norm': 0.0012812651416942147, 'learning_rate': 0.025541749944535553, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:17<10:28, 3.70s/it] 68%|██████▊ | 351/520 [22:21<10:24, 3.70s/it] {'loss': 1.1012, 'grad_norm': 0.0011732776955844157, 'learning_rate': 0.02527039454852963, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:21<10:24, 3.70s/it] 68%|██████▊ | 352/520 [22:25<10:19, 3.69s/it] {'loss': 1.2121, 'grad_norm': 0.0011618826474714457, 'learning_rate': 0.025000000000000012, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:25<10:19, 3.69s/it] 68%|██████▊ | 353/520 [22:28<10:19, 3.71s/it] {'loss': 1.1362, 'grad_norm': 0.0010195785529034255, 'learning_rate': 0.02473057680489348, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:28<10:19, 3.71s/it] 68%|██████▊ | 354/520 [22:32<10:13, 3.70s/it] {'loss': 1.2276, 'grad_norm': 0.001129868833360024, 'learning_rate': 0.024462135431415732, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:32<10:13, 3.70s/it] 68%|██████▊ | 355/520 [22:36<10:07, 3.68s/it] {'loss': 1.1627, 'grad_norm': 0.0012432634431077062, 'learning_rate': 0.024194686309624666, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:36<10:07, 3.68s/it] 68%|██████▊ | 356/520 [22:39<10:04, 3.69s/it] {'loss': 1.1644, 'grad_norm': 0.0012611809541979825, 'learning_rate': 0.0239282398310251, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:39<10:04, 3.69s/it] 69%|██████▊ | 357/520 [22:43<09:58, 3.67s/it] {'loss': 1.2011, 'grad_norm': 0.0011747385449650618, 'learning_rate': 0.023662806348164964, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:43<09:58, 3.67s/it] 69%|██████▉ | 358/520 [22:47<09:55, 3.68s/it] {'loss': 1.1238, 'grad_norm': 0.0012628621911638912, 'learning_rate': 0.02339839617423318, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:47<09:55, 3.68s/it] 69%|██████▉ | 359/520 [22:50<09:53, 3.68s/it] {'loss': 1.172, 'grad_norm': 0.0012386644480645824, 'learning_rate': 0.023135019582658803, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:50<09:53, 3.68s/it] 69%|██████▉ | 360/520 [22:54<09:48, 3.68s/it] {'loss': 1.1773, 'grad_norm': 0.0011974319385457397, 'learning_rate': 0.022872686806712034, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:54<09:48, 3.68s/it] 69%|██████▉ | 361/520 [22:58<09:45, 3.68s/it] {'loss': 1.195, 'grad_norm': 0.0010812413555770508, 'learning_rate': 0.02261140803910644, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:58<09:45, 3.68s/it] 70%|██████▉ | 362/520 [23:01<09:44, 3.70s/it] {'loss': 1.1731, 'grad_norm': 0.0013416353301791998, 'learning_rate': 0.02235119343160303, 'epoch': 0.7} + 70%|██████▉ | 362/520 [23:01<09:44, 3.70s/it] 70%|██████▉ | 363/520 [23:05<09:39, 3.69s/it] {'loss': 1.2052, 'grad_norm': 0.0012365664098780665, 'learning_rate': 0.022092053094615812, 'epoch': 0.7} + 70%|██████▉ | 363/520 [23:05<09:39, 3.69s/it] 70%|███████ | 364/520 [23:09<09:37, 3.70s/it] {'loss': 1.2096, 'grad_norm': 0.001232504344830143, 'learning_rate': 0.021833997096818897, 'epoch': 0.7} + 70%|███████ | 364/520 [23:09<09:37, 3.70s/it] 70%|███████ | 365/520 [23:13<09:37, 3.72s/it] {'loss': 1.2566, 'grad_norm': 0.001270225477429386, 'learning_rate': 0.021577035464755392, 'epoch': 0.7} + 70%|███████ | 365/520 [23:13<09:37, 3.72s/it] 70%|███████ | 366/520 [23:16<09:34, 3.73s/it] {'loss': 1.2233, 'grad_norm': 0.0012021245558891337, 'learning_rate': 0.02132117818244771, 'epoch': 0.7} + 70%|███████ | 366/520 [23:16<09:34, 3.73s/it] 71%|███████ | 367/520 [23:20<09:28, 3.72s/it] {'loss': 1.2209, 'grad_norm': 0.0012787122731978248, 'learning_rate': 0.021066435191009716, 'epoch': 0.71} + 71%|███████ | 367/520 [23:20<09:28, 3.72s/it] 71%|███████ | 368/520 [23:24<09:23, 3.71s/it] {'loss': 1.0757, 'grad_norm': 0.0012443484845318109, 'learning_rate': 0.02081281638826052, 'epoch': 0.71} + 71%|███████ | 368/520 [23:24<09:23, 3.71s/it] 71%|███████ | 369/520 [23:27<09:21, 3.72s/it] {'loss': 1.173, 'grad_norm': 0.0010936046717393332, 'learning_rate': 0.02056033162833977, 'epoch': 0.71} + 71%|███████ | 369/520 [23:28<09:21, 3.72s/it] 71%|███████ | 370/520 [23:31<09:16, 3.71s/it] {'loss': 1.1361, 'grad_norm': 0.0011710429976621865, 'learning_rate': 0.02030899072132493, 'epoch': 0.71} + 71%|███████ | 370/520 [23:31<09:16, 3.71s/it] 71%|███████▏ | 371/520 [23:35<09:11, 3.70s/it] {'loss': 1.1223, 'grad_norm': 0.00130362338713529, 'learning_rate': 0.02005880343284999, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:35<09:11, 3.70s/it] 72%|███████▏ | 372/520 [23:39<09:09, 3.72s/it] {'loss': 1.2411, 'grad_norm': 0.001090362550908478, 'learning_rate': 0.01980977948372612, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:39<09:09, 3.72s/it] 72%|███████▏ | 373/520 [23:42<09:05, 3.71s/it] {'loss': 1.1292, 'grad_norm': 0.0012951567278391835, 'learning_rate': 0.01956192854956397, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:42<09:05, 3.71s/it] 72%|███████▏ | 374/520 [23:46<09:02, 3.72s/it] {'loss': 1.22, 'grad_norm': 0.001278155292902728, 'learning_rate': 0.019315260260397638, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:46<09:02, 3.72s/it] 72%|███████▏ | 375/520 [23:50<09:02, 3.74s/it] {'loss': 1.1371, 'grad_norm': 0.001265112595119364, 'learning_rate': 0.019069784200310594, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:50<09:02, 3.74s/it] 72%|███████▏ | 376/520 [23:54<08:59, 3.75s/it] {'loss': 1.2399, 'grad_norm': 0.001182376123824078, 'learning_rate': 0.018825509907063328, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:54<08:59, 3.75s/it] 72%|███████▎ | 377/520 [23:57<08:53, 3.73s/it] {'loss': 1.1722, 'grad_norm': 0.0013506426716398195, 'learning_rate': 0.018582446871722638, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:57<08:53, 3.73s/it] 73%|███████▎ | 378/520 [24:01<08:48, 3.72s/it] {'loss': 1.2371, 'grad_norm': 0.0011801029374830462, 'learning_rate': 0.018340604538293014, 'epoch': 0.73} + 73%|███████▎ | 378/520 [24:01<08:48, 3.72s/it] 73%|███████▎ | 379/520 [24:05<08:42, 3.70s/it] {'loss': 1.2017, 'grad_norm': 0.0011501047359345864, 'learning_rate': 0.018099992303349577, 'epoch': 0.73} + 73%|███████▎ | 379/520 [24:05<08:42, 3.70s/it] 73%|███████▎ | 380/520 [24:08<08:36, 3.69s/it] {'loss': 1.2181, 'grad_norm': 0.0012469619078704106, 'learning_rate': 0.017860619515673033, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:08<08:36, 3.69s/it] 73%|███████▎ | 381/520 [24:12<08:34, 3.70s/it] {'loss': 1.2133, 'grad_norm': 0.0011649948109659406, 'learning_rate': 0.017622495475886485, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:12<08:34, 3.70s/it] 73%|███████▎ | 382/520 [24:16<08:32, 3.71s/it] {'loss': 1.1865, 'grad_norm': 0.001116690256549869, 'learning_rate': 0.01738562943609396, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:16<08:32, 3.71s/it] 74%|███████▎ | 383/520 [24:20<08:31, 3.73s/it] {'loss': 1.0581, 'grad_norm': 0.0013436157295288328, 'learning_rate': 0.01715003059952098, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:20<08:31, 3.73s/it] 74%|███████▍ | 384/520 [24:23<08:31, 3.76s/it] {'loss': 1.2128, 'grad_norm': 0.001055560402738259, 'learning_rate': 0.016915708120157042, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:23<08:31, 3.76s/it] 74%|███████▍ | 385/520 [24:27<08:32, 3.80s/it] {'loss': 1.198, 'grad_norm': 0.0011786771343886115, 'learning_rate': 0.016682671102399804, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:27<08:32, 3.80s/it] 74%|███████▍ | 386/520 [24:31<08:32, 3.82s/it] {'loss': 1.148, 'grad_norm': 0.001048920014156604, 'learning_rate': 0.016450928600701503, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:31<08:32, 3.82s/it] 74%|███████▍ | 387/520 [24:35<08:30, 3.84s/it] {'loss': 1.2378, 'grad_norm': 0.0011848431421123701, 'learning_rate': 0.016220489619216988, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:35<08:30, 3.84s/it] 75%|███████▍ | 388/520 [24:39<08:28, 3.85s/it] {'loss': 1.1104, 'grad_norm': 0.001169753842845758, 'learning_rate': 0.01599136311145402, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:39<08:28, 3.85s/it] 75%|███████▍ | 389/520 [24:43<08:24, 3.85s/it] {'loss': 1.1558, 'grad_norm': 0.0015517474796828414, 'learning_rate': 0.015763557979925326, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:43<08:24, 3.85s/it] 75%|███████▌ | 390/520 [24:47<08:22, 3.87s/it] {'loss': 1.2254, 'grad_norm': 0.0011774635879288372, 'learning_rate': 0.015537083075802649, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:47<08:22, 3.87s/it] 75%|███████▌ | 391/520 [24:50<08:18, 3.86s/it] {'loss': 1.2835, 'grad_norm': 0.0012380920109194826, 'learning_rate': 0.015311947198572918, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:50<08:18, 3.86s/it] 75%|███████▌ | 392/520 [24:54<08:10, 3.83s/it] {'loss': 1.1113, 'grad_norm': 0.0012058771670218798, 'learning_rate': 0.015088159095696364, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:54<08:10, 3.83s/it] 76%|███████▌ | 393/520 [24:58<08:01, 3.80s/it] {'loss': 1.1004, 'grad_norm': 0.0010468217724865585, 'learning_rate': 0.014865727462266543, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:58<08:01, 3.80s/it] 76%|███████▌ | 394/520 [25:02<07:52, 3.75s/it] {'loss': 1.1789, 'grad_norm': 0.0012884570893722301, 'learning_rate': 0.014644660940672627, 'epoch': 0.76} + 76%|███████▌ | 394/520 [25:02<07:52, 3.75s/it] 76%|███████▌ | 395/520 [25:05<07:45, 3.72s/it] {'loss': 1.1463, 'grad_norm': 0.0013183120572365667, 'learning_rate': 0.014424968120263504, 'epoch': 0.76} + 76%|███████▌ | 395/520 [25:05<07:45, 3.72s/it] 76%|███████▌ | 396/520 [25:09<07:39, 3.71s/it] {'loss': 1.2214, 'grad_norm': 0.0013124943397670643, 'learning_rate': 0.014206657537014078, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:09<07:39, 3.71s/it] 76%|███████▋ | 397/520 [25:13<07:35, 3.70s/it] {'loss': 1.1955, 'grad_norm': 0.0012135282393324863, 'learning_rate': 0.013989737673193682, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:13<07:35, 3.70s/it] 77%|███████▋ | 398/520 [25:16<07:33, 3.72s/it] {'loss': 1.1924, 'grad_norm': 0.0012990355474729511, 'learning_rate': 0.013774216957036367, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:16<07:33, 3.72s/it] 77%|███████▋ | 399/520 [25:20<07:28, 3.70s/it] {'loss': 1.1302, 'grad_norm': 0.0011622644367961779, 'learning_rate': 0.013560103762413584, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:20<07:28, 3.70s/it] 77%|███████▋ | 400/520 [25:24<07:23, 3.69s/it] {'loss': 1.1633, 'grad_norm': 0.0011154181410736675, 'learning_rate': 0.013347406408508695, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:24<07:23, 3.69s/it] 77%|███████▋ | 401/520 [25:28<07:24, 3.74s/it] {'loss': 1.0376, 'grad_norm': 0.0013124907974505287, 'learning_rate': 0.013136133159493801, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:28<07:24, 3.74s/it] 77%|███████▋ | 402/520 [25:31<07:22, 3.75s/it] {'loss': 1.1623, 'grad_norm': 0.0012712366496513425, 'learning_rate': 0.012926292224208664, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:31<07:22, 3.75s/it] 78%|███████▊ | 403/520 [25:35<07:21, 3.78s/it] {'loss': 1.1845, 'grad_norm': 0.0013457567406305613, 'learning_rate': 0.012717891755841721, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:35<07:21, 3.78s/it] 78%|███████▊ | 404/520 [25:39<07:21, 3.80s/it] {'loss': 1.098, 'grad_norm': 0.001420450804580492, 'learning_rate': 0.012510939851613286, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:39<07:21, 3.80s/it] 78%|███████▊ | 405/520 [25:43<07:18, 3.81s/it] {'loss': 1.1456, 'grad_norm': 0.0011711072617874325, 'learning_rate': 0.01230544455246101, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:43<07:18, 3.81s/it] 78%|███████▊ | 406/520 [25:47<07:14, 3.82s/it] {'loss': 1.0731, 'grad_norm': 0.0014278215638699513, 'learning_rate': 0.012101413842727345, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:47<07:14, 3.82s/it] 78%|███████▊ | 407/520 [25:51<07:11, 3.81s/it] {'loss': 1.2651, 'grad_norm': 0.0012580768919631354, 'learning_rate': 0.01189885564984946, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:51<07:11, 3.81s/it] 78%|███████▊ | 408/520 [25:54<07:08, 3.82s/it] {'loss': 1.179, 'grad_norm': 0.0013538314817124722, 'learning_rate': 0.011697777844051106, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:54<07:08, 3.82s/it] 79%|███████▊ | 409/520 [25:58<07:04, 3.83s/it] {'loss': 1.2955, 'grad_norm': 0.0013114099161412331, 'learning_rate': 0.01149818823803686, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:58<07:04, 3.83s/it] 79%|███████▉ | 410/520 [26:02<07:00, 3.82s/it] {'loss': 1.0368, 'grad_norm': 0.0012470192608465694, 'learning_rate': 0.011300094586688632, 'epoch': 0.79} + 79%|███████▉ | 410/520 [26:02<07:00, 3.82s/it] 79%|███████▉ | 411/520 [26:06<06:57, 3.83s/it] {'loss': 1.2749, 'grad_norm': 0.0013251537345114674, 'learning_rate': 0.011103504586764262, 'epoch': 0.79} + 79%|███████▉ | 411/520 [26:06<06:57, 3.83s/it] 79%|███████▉ | 412/520 [26:10<06:51, 3.81s/it] {'loss': 1.1818, 'grad_norm': 0.0012175283814183705, 'learning_rate': 0.01090842587659851, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:10<06:51, 3.81s/it] 79%|███████▉ | 413/520 [26:13<06:47, 3.81s/it] {'loss': 1.1614, 'grad_norm': 0.0011654651482733436, 'learning_rate': 0.010714866035806327, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:13<06:47, 3.81s/it] 80%|███████▉ | 414/520 [26:17<06:45, 3.83s/it] {'loss': 0.9727, 'grad_norm': 0.0010039101469299593, 'learning_rate': 0.010522832584988234, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:17<06:45, 3.83s/it] 80%|███████▉ | 415/520 [26:21<06:42, 3.84s/it] {'loss': 1.1664, 'grad_norm': 0.0011669554354200521, 'learning_rate': 0.010332332985438248, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:21<06:42, 3.84s/it] 80%|████████ | 416/520 [26:25<06:38, 3.83s/it] {'loss': 1.0705, 'grad_norm': 0.0013316087128016846, 'learning_rate': 0.010143374638853892, 'epoch': 0.8} + 80%|████████ | 416/520 [26:25<06:38, 3.83s/it] 80%|████████ | 417/520 [26:29<06:36, 3.85s/it] {'loss': 1.233, 'grad_norm': 0.0012275034877502204, 'learning_rate': 0.009955964887048608, 'epoch': 0.8} + 80%|████████ | 417/520 [26:29<06:36, 3.85s/it] 80%|████████ | 418/520 [26:33<06:30, 3.82s/it] {'loss': 1.2276, 'grad_norm': 0.001149608580236, 'learning_rate': 0.009770111011666582, 'epoch': 0.8} + 80%|████████ | 418/520 [26:33<06:30, 3.82s/it] 81%|████████ | 419/520 [26:36<06:23, 3.80s/it] {'loss': 1.2185, 'grad_norm': 0.0013484828637035959, 'learning_rate': 0.00958582023389974, 'epoch': 0.81} + 81%|████████ | 419/520 [26:36<06:23, 3.80s/it] 81%|████████ | 420/520 [26:40<06:16, 3.77s/it] {'loss': 1.111, 'grad_norm': 0.001287849528149616, 'learning_rate': 0.009403099714207176, 'epoch': 0.81} + 81%|████████ | 420/520 [26:40<06:16, 3.77s/it] 81%|████████ | 421/520 [26:44<06:10, 3.75s/it] {'loss': 1.049, 'grad_norm': 0.0012790914715820332, 'learning_rate': 0.009221956552036992, 'epoch': 0.81} + 81%|████████ | 421/520 [26:44<06:10, 3.75s/it] 81%|████████ | 422/520 [26:47<06:05, 3.73s/it] {'loss': 1.1721, 'grad_norm': 0.0013057768304299794, 'learning_rate': 0.009042397785550405, 'epoch': 0.81} + 81%|████████ | 422/520 [26:47<06:05, 3.73s/it] 81%|████████▏ | 423/520 [26:51<06:00, 3.72s/it] {'loss': 1.1374, 'grad_norm': 0.001325165653790828, 'learning_rate': 0.008864430391348333, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:51<06:00, 3.72s/it] 82%|████████▏ | 424/520 [26:55<05:55, 3.71s/it] {'loss': 1.2464, 'grad_norm': 0.001131354309361885, 'learning_rate': 0.008688061284200266, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:55<05:55, 3.71s/it] 82%|████████▏ | 425/520 [26:59<05:52, 3.71s/it] {'loss': 1.1592, 'grad_norm': 0.0012496595878428423, 'learning_rate': 0.008513297316775626, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:59<05:52, 3.71s/it] 82%|████████▏ | 426/520 [27:02<05:46, 3.69s/it] {'loss': 1.1929, 'grad_norm': 0.0016380479600963222, 'learning_rate': 0.00834014527937756, 'epoch': 0.82} + 82%|████████▏ | 426/520 [27:02<05:46, 3.69s/it] 82%|████████▏ | 427/520 [27:06<05:43, 3.69s/it] {'loss': 1.0898, 'grad_norm': 0.001190890187457732, 'learning_rate': 0.008168611899679013, 'epoch': 0.82} + 82%|████████▏ | 427/520 [27:06<05:43, 3.69s/it] 82%|████████▏ | 428/520 [27:10<05:39, 3.69s/it] {'loss': 1.0832, 'grad_norm': 0.001346415077636939, 'learning_rate': 0.00799870384246143, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:10<05:39, 3.69s/it] 82%|████████▎ | 429/520 [27:13<05:36, 3.70s/it] {'loss': 1.1794, 'grad_norm': 0.0012411271491725255, 'learning_rate': 0.007830427709355726, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:13<05:36, 3.70s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:17<05:33, 3.70s/it] {'loss': 1.1785, 'grad_norm': 0.0011824950147063884, 'learning_rate': 0.0076637900385857945, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:17<05:33, 3.70s/it] 83%|████████▎ | 431/520 [27:21<05:28, 3.69s/it] {'loss': 1.1354, 'grad_norm': 0.0011972210649265796, 'learning_rate': 0.007498797304714544, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:21<05:28, 3.69s/it] 83%|████████▎ | 432/520 [27:24<05:25, 3.70s/it] {'loss': 1.0858, 'grad_norm': 0.0012881909024624687, 'learning_rate': 0.00733545591839222, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:24<05:25, 3.70s/it] 83%|████████▎ | 433/520 [27:28<05:25, 3.74s/it] {'loss': 1.2201, 'grad_norm': 0.001213263354009698, 'learning_rate': 0.007173772226107433, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:28<05:25, 3.74s/it] 83%|████████▎ | 434/520 [27:32<05:20, 3.73s/it] {'loss': 0.9746, 'grad_norm': 0.0012914921082082035, 'learning_rate': 0.0070137525099404855, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:32<05:20, 3.73s/it] 84%|████████▎ | 435/520 [27:36<05:15, 3.71s/it] {'loss': 1.2483, 'grad_norm': 0.00133280349069464, 'learning_rate': 0.006855402987319348, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:36<05:15, 3.71s/it] 84%|████████▍ | 436/520 [27:39<05:10, 3.70s/it] {'loss': 1.0621, 'grad_norm': 0.0013055839909131024, 'learning_rate': 0.006698729810778065, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:39<05:10, 3.70s/it] 84%|████████▍ | 437/520 [27:43<05:06, 3.69s/it] {'loss': 1.274, 'grad_norm': 0.0012744085027753614, 'learning_rate': 0.00654373906771768, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:43<05:06, 3.69s/it] 84%|████████▍ | 438/520 [27:47<05:02, 3.69s/it] {'loss': 1.0924, 'grad_norm': 0.0012273470748179406, 'learning_rate': 0.006390436780169734, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:47<05:02, 3.69s/it] 84%|████████▍ | 439/520 [27:50<04:58, 3.69s/it] {'loss': 1.1185, 'grad_norm': 0.0009918347717451247, 'learning_rate': 0.006238828904562316, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:50<04:58, 3.69s/it] 85%|████████▍ | 440/520 [27:54<04:55, 3.69s/it] {'loss': 1.1298, 'grad_norm': 0.0012869029705720888, 'learning_rate': 0.006088921331488567, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:54<04:55, 3.69s/it] 85%|████████▍ | 441/520 [27:58<04:52, 3.70s/it] {'loss': 1.1324, 'grad_norm': 0.001223353965968551, 'learning_rate': 0.00594071988547788, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:58<04:52, 3.70s/it] 85%|████████▌ | 442/520 [28:01<04:47, 3.69s/it] {'loss': 1.1914, 'grad_norm': 0.0013787544781191045, 'learning_rate': 0.005794230324769518, 'epoch': 0.85} + 85%|████████▌ | 442/520 [28:01<04:47, 3.69s/it] 85%|████████▌ | 443/520 [28:05<04:45, 3.71s/it] {'loss': 1.2001, 'grad_norm': 0.0012116267943254424, 'learning_rate': 0.0056494583410889145, 'epoch': 0.85} + 85%|████████▌ | 443/520 [28:05<04:45, 3.71s/it] 85%|████████▌ | 444/520 [28:09<04:42, 3.71s/it] {'loss': 1.1698, 'grad_norm': 0.0011098340180279622, 'learning_rate': 0.005506409559426573, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:09<04:42, 3.71s/it] 86%|████████▌ | 445/520 [28:13<04:37, 3.70s/it] {'loss': 1.0973, 'grad_norm': 0.0011826462036013455, 'learning_rate': 0.005365089537819435, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:13<04:37, 3.70s/it] 86%|████████▌ | 446/520 [28:16<04:33, 3.70s/it] {'loss': 1.2084, 'grad_norm': 0.001093048822232073, 'learning_rate': 0.005225503767134954, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:16<04:33, 3.70s/it] 86%|████████▌ | 447/520 [28:20<04:30, 3.70s/it] {'loss': 1.1634, 'grad_norm': 0.0012205143393180786, 'learning_rate': 0.005087657670857799, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:20<04:30, 3.70s/it] 86%|████████▌ | 448/520 [28:24<04:26, 3.70s/it] {'loss': 1.1646, 'grad_norm': 0.0013291774475541215, 'learning_rate': 0.004951556604879049, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:24<04:26, 3.70s/it] 86%|████████▋ | 449/520 [28:27<04:21, 3.69s/it] {'loss': 1.1677, 'grad_norm': 0.0011988424716593512, 'learning_rate': 0.004817205857288176, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:27<04:21, 3.69s/it] 87%|████████▋ | 450/520 [28:31<04:18, 3.69s/it] {'loss': 1.1916, 'grad_norm': 0.0012715684705851373, 'learning_rate': 0.004684610648167504, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:31<04:18, 3.69s/it] 87%|████████▋ | 451/520 [28:35<04:15, 3.70s/it] {'loss': 1.1966, 'grad_norm': 0.0012987134649179548, 'learning_rate': 0.004553776129389453, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:35<04:15, 3.70s/it] 87%|████████▋ | 452/520 [28:38<04:11, 3.70s/it] {'loss': 1.2181, 'grad_norm': 0.0011477228661215355, 'learning_rate': 0.004424707384416343, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:38<04:11, 3.70s/it] 87%|████████▋ | 453/520 [28:42<04:07, 3.69s/it] {'loss': 1.1901, 'grad_norm': 0.0011446218051085959, 'learning_rate': 0.0042974094281028495, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:42<04:07, 3.69s/it] 87%|████████▋ | 454/520 [28:46<04:05, 3.72s/it] {'loss': 1.1029, 'grad_norm': 0.0012313194938231664, 'learning_rate': 0.00417188720650119, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:46<04:05, 3.72s/it] 88%|████████▊ | 455/520 [28:50<04:02, 3.73s/it] {'loss': 1.2424, 'grad_norm': 0.0012501808138084148, 'learning_rate': 0.004048145596668967, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:50<04:02, 3.73s/it] 88%|████████▊ | 456/520 [28:53<03:57, 3.71s/it] {'loss': 1.1764, 'grad_norm': 0.0012595500220638279, 'learning_rate': 0.003926189406479613, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:53<03:57, 3.71s/it] 88%|████████▊ | 457/520 [28:57<03:54, 3.72s/it] {'loss': 1.0753, 'grad_norm': 0.001049164514905811, 'learning_rate': 0.0038060233744356634, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:57<03:54, 3.72s/it] 88%|████████▊ | 458/520 [29:01<03:50, 3.72s/it] {'loss': 1.2927, 'grad_norm': 0.0013412713753385532, 'learning_rate': 0.003687652169484568, 'epoch': 0.88} + 88%|████████▊ | 458/520 [29:01<03:50, 3.72s/it] 88%|████████▊ | 459/520 [29:04<03:46, 3.71s/it] {'loss': 1.2233, 'grad_norm': 0.0012166865300829927, 'learning_rate': 0.0035710803908373225, 'epoch': 0.88} + 88%|████████▊ | 459/520 [29:04<03:46, 3.71s/it] 88%|████████▊ | 460/520 [29:08<03:42, 3.71s/it] {'loss': 1.1137, 'grad_norm': 0.0012040144728257787, 'learning_rate': 0.0034563125677897935, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:08<03:42, 3.71s/it] 89%|████████▊ | 461/520 [29:12<03:39, 3.72s/it] {'loss': 1.161, 'grad_norm': 0.0008989060726563035, 'learning_rate': 0.0033433531595466748, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:12<03:39, 3.72s/it] 89%|████████▉ | 462/520 [29:16<03:35, 3.71s/it] {'loss': 1.2555, 'grad_norm': 0.0011701602783329005, 'learning_rate': 0.0032322065550483003, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:16<03:35, 3.71s/it] 89%|████████▉ | 463/520 [29:19<03:32, 3.72s/it] {'loss': 1.089, 'grad_norm': 0.0012805289518837489, 'learning_rate': 0.0031228770728000455, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:19<03:32, 3.72s/it] 89%|████████▉ | 464/520 [29:23<03:30, 3.76s/it] {'loss': 1.2069, 'grad_norm': 0.0012536443091367138, 'learning_rate': 0.0030153689607045845, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:23<03:30, 3.76s/it] 89%|████████▉ | 465/520 [29:27<03:26, 3.75s/it] {'loss': 1.3098, 'grad_norm': 0.0012859901038750948, 'learning_rate': 0.002909686395896827, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:27<03:26, 3.75s/it] 90%|████████▉ | 466/520 [29:31<03:22, 3.75s/it] {'loss': 1.2081, 'grad_norm': 0.0011317452998755766, 'learning_rate': 0.0028058334845816216, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:31<03:22, 3.75s/it] 90%|████████▉ | 467/520 [29:34<03:18, 3.75s/it] {'loss': 1.1468, 'grad_norm': 0.00113778138820628, 'learning_rate': 0.002703814261874199, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:34<03:18, 3.75s/it] 90%|█████████ | 468/520 [29:38<03:15, 3.75s/it] {'loss': 1.1722, 'grad_norm': 0.0014182161444008484, 'learning_rate': 0.002603632691643415, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:38<03:15, 3.75s/it] 90%|█████████ | 469/520 [29:42<03:10, 3.73s/it] {'loss': 1.2416, 'grad_norm': 0.001336140817746277, 'learning_rate': 0.0025052926663577005, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:42<03:10, 3.73s/it] 90%|█████████ | 470/520 [29:46<03:06, 3.72s/it] {'loss': 1.1139, 'grad_norm': 0.0011514913498942656, 'learning_rate': 0.0024087980069338825, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:46<03:06, 3.72s/it] 91%|█████████ | 471/520 [29:49<03:01, 3.71s/it] {'loss': 1.1424, 'grad_norm': 0.001344634205800149, 'learning_rate': 0.002314152462588659, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:49<03:01, 3.71s/it] 91%|█████████ | 472/520 [29:53<02:58, 3.72s/it] {'loss': 1.1106, 'grad_norm': 0.0011759955389460271, 'learning_rate': 0.0022213597106929607, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:53<02:58, 3.72s/it] 91%|█████████ | 473/520 [29:57<02:55, 3.73s/it] {'loss': 1.183, 'grad_norm': 0.0012818094924471302, 'learning_rate': 0.0021304233566290967, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:57<02:55, 3.73s/it] 91%|█████████ | 474/520 [30:00<02:51, 3.73s/it] {'loss': 1.1798, 'grad_norm': 0.0011446351115151974, 'learning_rate': 0.002041346933650612, 'epoch': 0.91} + 91%|█████████ | 474/520 [30:00<02:51, 3.73s/it] 91%|█████████▏| 475/520 [30:04<02:47, 3.73s/it] {'loss': 1.0955, 'grad_norm': 0.0011409446218894894, 'learning_rate': 0.0019541339027450257, 'epoch': 0.91} + 91%|█████████▏| 475/520 [30:04<02:47, 3.73s/it] 92%|█████████▏| 476/520 [30:08<02:43, 3.71s/it] {'loss': 1.1642, 'grad_norm': 0.0012996486946793096, 'learning_rate': 0.0018687876524993985, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:08<02:43, 3.71s/it] 92%|█████████▏| 477/520 [30:12<02:39, 3.70s/it] {'loss': 1.1626, 'grad_norm': 0.0013643168568121093, 'learning_rate': 0.001785311498968617, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:12<02:39, 3.70s/it] 92%|█████████▏| 478/520 [30:15<02:36, 3.72s/it] {'loss': 1.1036, 'grad_norm': 0.0012063461814318637, 'learning_rate': 0.00170370868554659, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:15<02:36, 3.72s/it] 92%|█████████▏| 479/520 [30:19<02:32, 3.72s/it] {'loss': 1.1532, 'grad_norm': 0.0012868006455161573, 'learning_rate': 0.0016239823828401946, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:19<02:32, 3.72s/it] 92%|█████████▏| 480/520 [30:23<02:28, 3.71s/it] {'loss': 1.1674, 'grad_norm': 0.0011372183297470497, 'learning_rate': 0.0015461356885461076, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:23<02:28, 3.71s/it] 92%|█████████▎| 481/520 [30:26<02:25, 3.74s/it] {'loss': 1.1565, 'grad_norm': 0.001079261845367422, 'learning_rate': 0.0014701716273304523, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:26<02:25, 3.74s/it] 93%|█████████▎| 482/520 [30:30<02:22, 3.75s/it] {'loss': 1.179, 'grad_norm': 0.0011296523979630185, 'learning_rate': 0.0013960931507112752, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:30<02:22, 3.75s/it] 93%|█████████▎| 483/520 [30:34<02:18, 3.74s/it] {'loss': 1.1714, 'grad_norm': 0.0012238420450017544, 'learning_rate': 0.0013239031369438325, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:34<02:18, 3.74s/it] 93%|█████████▎| 484/520 [30:38<02:14, 3.73s/it] {'loss': 1.1824, 'grad_norm': 0.0012764878947215954, 'learning_rate': 0.0012536043909088192, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:38<02:14, 3.73s/it] 93%|█████████▎| 485/520 [30:41<02:10, 3.72s/it] {'loss': 1.1339, 'grad_norm': 0.0012058969779527567, 'learning_rate': 0.001185199644003332, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:41<02:10, 3.72s/it] 93%|█████████▎| 486/520 [30:45<02:06, 3.73s/it] {'loss': 1.252, 'grad_norm': 0.0012735432723826909, 'learning_rate': 0.0011186915540347731, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:45<02:06, 3.73s/it] 94%|█████████▎| 487/520 [30:49<02:02, 3.73s/it] {'loss': 1.117, 'grad_norm': 0.0012000147106422136, 'learning_rate': 0.0010540827051175817, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:49<02:02, 3.73s/it] 94%|█████████▍| 488/520 [30:53<01:58, 3.71s/it] {'loss': 1.0586, 'grad_norm': 0.0012691806446346605, 'learning_rate': 0.0009913756075728088, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:53<01:58, 3.71s/it] 94%|█████████▍| 489/520 [30:56<01:54, 3.70s/it] {'loss': 1.1796, 'grad_norm': 0.0010420541369265252, 'learning_rate': 0.0009305726978306172, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:56<01:54, 3.70s/it] 94%|█████████▍| 490/520 [31:00<01:50, 3.70s/it] {'loss': 1.1777, 'grad_norm': 0.0012523710730799433, 'learning_rate': 0.0008716763383355864, 'epoch': 0.94} + 94%|█████████▍| 490/520 [31:00<01:50, 3.70s/it] 94%|█████████▍| 491/520 [31:04<01:46, 3.68s/it] {'loss': 1.1425, 'grad_norm': 0.0013035103404853074, 'learning_rate': 0.0008146888174549339, 'epoch': 0.94} + 94%|█████████▍| 491/520 [31:04<01:46, 3.68s/it] 95%|█████████▍| 492/520 [31:07<01:43, 3.69s/it] {'loss': 1.254, 'grad_norm': 0.0012599301427586595, 'learning_rate': 0.0007596123493895991, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:07<01:43, 3.69s/it] 95%|█████████▍| 493/520 [31:11<01:39, 3.69s/it] {'loss': 1.1737, 'grad_norm': 0.0012792491334092985, 'learning_rate': 0.0007064490740882057, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:11<01:39, 3.69s/it] 95%|█████████▌| 494/520 [31:15<01:36, 3.70s/it] {'loss': 1.1839, 'grad_norm': 0.0011320980319713156, 'learning_rate': 0.0006552010571639456, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:15<01:36, 3.70s/it] 95%|█████████▌| 495/520 [31:18<01:32, 3.68s/it] {'loss': 1.1678, 'grad_norm': 0.0012806734353554047, 'learning_rate': 0.0006058702898142643, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:18<01:32, 3.68s/it] 95%|█████████▌| 496/520 [31:22<01:28, 3.69s/it] {'loss': 1.0952, 'grad_norm': 0.0013116571876803892, 'learning_rate': 0.0005584586887435739, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:22<01:28, 3.69s/it] 96%|█████████▌| 497/520 [31:26<01:25, 3.70s/it] {'loss': 1.1082, 'grad_norm': 0.0010460185664044187, 'learning_rate': 0.0005129680960887006, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:26<01:25, 3.70s/it] 96%|█████████▌| 498/520 [31:29<01:21, 3.69s/it] {'loss': 1.1564, 'grad_norm': 0.0012475706729059945, 'learning_rate': 0.0004694002793473595, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:29<01:21, 3.69s/it] 96%|█████████▌| 499/520 [31:33<01:17, 3.71s/it] {'loss': 1.246, 'grad_norm': 0.0011400795348855564, 'learning_rate': 0.0004277569313094809, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:33<01:17, 3.71s/it] 96%|█████████▌| 500/520 [31:37<01:13, 3.70s/it] {'loss': 1.2774, 'grad_norm': 0.0014144345129216082, 'learning_rate': 0.00038803966999139685, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:37<01:13, 3.70s/it] 96%|█████████▋| 501/520 [31:40<01:10, 3.69s/it] {'loss': 1.1481, 'grad_norm': 0.001257376517316448, 'learning_rate': 0.000350250038573019, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:40<01:10, 3.69s/it] 97%|█████████▋| 502/520 [31:44<01:06, 3.69s/it] {'loss': 1.1921, 'grad_norm': 0.0011959487504647937, 'learning_rate': 0.00031438950533786984, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:44<01:06, 3.69s/it] 97%|█████████▋| 503/520 [31:48<01:02, 3.70s/it] {'loss': 1.1361, 'grad_norm': 0.0011850789895452658, 'learning_rate': 0.00028045946361601184, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:48<01:02, 3.70s/it] 97%|█████████▋| 504/520 [31:52<00:59, 3.69s/it] {'loss': 1.1814, 'grad_norm': 0.0014192339785934103, 'learning_rate': 0.0002484612317299295, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:52<00:59, 3.69s/it] 97%|█████████▋| 505/520 [31:55<00:55, 3.69s/it] {'loss': 1.2122, 'grad_norm': 0.001250561780935766, 'learning_rate': 0.00021839605294330934, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:55<00:55, 3.69s/it] 97%|█████████▋| 506/520 [31:59<00:51, 3.70s/it] {'loss': 1.1467, 'grad_norm': 0.0013219852957363078, 'learning_rate': 0.00019026509541272276, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:59<00:51, 3.70s/it] 98%|█████████▊| 507/520 [32:03<00:47, 3.69s/it] {'loss': 1.2799, 'grad_norm': 0.0010984456720136877, 'learning_rate': 0.0001640694521422459, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:03<00:47, 3.69s/it] 98%|█████████▊| 508/520 [32:06<00:44, 3.69s/it] {'loss': 1.2632, 'grad_norm': 0.001277901191117845, 'learning_rate': 0.00013981014094099353, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:06<00:44, 3.69s/it] 98%|█████████▊| 509/520 [32:10<00:40, 3.68s/it] {'loss': 1.2352, 'grad_norm': 0.001191410586207414, 'learning_rate': 0.00011748810438355628, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:10<00:40, 3.68s/it] 98%|█████████▊| 510/520 [32:14<00:36, 3.68s/it] {'loss': 1.1854, 'grad_norm': 0.0012460610581602393, 'learning_rate': 9.710420977340761e-05, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:14<00:36, 3.68s/it] 98%|█████████▊| 511/520 [32:17<00:33, 3.67s/it] {'loss': 1.145, 'grad_norm': 0.001220779163759165, 'learning_rate': 7.865924910916978e-05, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:17<00:33, 3.67s/it] 98%|█████████▊| 512/520 [32:21<00:29, 3.68s/it] {'loss': 1.0399, 'grad_norm': 0.0012039375528744014, 'learning_rate': 6.215393905388278e-05, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:21<00:29, 3.68s/it] 99%|█████████▊| 513/520 [32:25<00:25, 3.68s/it] {'loss': 1.2351, 'grad_norm': 0.0013886818463149605, 'learning_rate': 4.758892090711009e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:25<00:25, 3.68s/it] 99%|█████████▉| 514/520 [32:28<00:22, 3.68s/it] {'loss': 1.203, 'grad_norm': 0.0011415797341581138, 'learning_rate': 3.496476058006959e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:28<00:22, 3.68s/it] 99%|█████████▉| 515/520 [32:32<00:18, 3.68s/it] {'loss': 1.255, 'grad_norm': 0.001462762860795642, 'learning_rate': 2.4281948573617874e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:32<00:18, 3.68s/it] 99%|█████████▉| 516/520 [32:36<00:14, 3.69s/it] {'loss': 1.171, 'grad_norm': 0.0012387301643707368, 'learning_rate': 1.5540899959187725e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:36<00:14, 3.69s/it] 99%|█████████▉| 517/520 [32:39<00:11, 3.68s/it] {'loss': 1.1787, 'grad_norm': 0.0011544843921431605, 'learning_rate': 8.741954362678772e-06, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:39<00:11, 3.68s/it] 100%|█████████▉| 518/520 [32:43<00:07, 3.66s/it] {'loss': 1.1743, 'grad_norm': 0.00129265221232262, 'learning_rate': 3.885375951256931e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:43<00:07, 3.66s/it] 100%|█████████▉| 519/520 [32:47<00:03, 3.67s/it] {'loss': 1.1532, 'grad_norm': 0.0011975858328013911, 'learning_rate': 9.713534230904043e-07, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:47<00:03, 3.67s/it] 100%|██████████| 520/520 [32:51<00:00, 3.93s/it] {'loss': 1.1422, 'grad_norm': 0.0010613408500776088, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:51<00:00, 3.93s/it] {'train_runtime': 1971.8089, 'train_samples_per_second': 33.74, 'train_steps_per_second': 0.264, 'train_loss': 1.2260054704088432, 'epoch': 1.0} + 100%|██████████| 520/520 [32:51<00:00, 3.93s/it] 100%|██████████| 520/520 [32:51<00:00, 3.79s/it] +[2025-10-10 09:53:56,539] [INFO] [launch.py:348:main] Process 1942333 exits successfully. +[2025-10-10 09:53:57,541] [INFO] [launch.py:348:main] Process 1942339 exits successfully. +[2025-10-10 09:53:57,541] [INFO] [launch.py:348:main] Process 1942336 exits successfully. +[2025-10-10 09:53:57,542] [INFO] [launch.py:348:main] Process 1942335 exits successfully. +[2025-10-10 09:53:57,542] [INFO] [launch.py:348:main] Process 1942337 exits successfully. +[2025-10-10 09:53:58,544] [INFO] [launch.py:348:main] Process 1942338 exits successfully. +[2025-10-10 09:53:58,545] [INFO] [launch.py:348:main] Process 1942334 exits successfully. +[2025-10-10 09:54:01,549] [INFO] [launch.py:348:main] Process 1942332 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251010_091529.log +Timestamp: 2025-10-10 09:54:04 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_055759.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_055759.log new file mode 100644 index 0000000000000000000000000000000000000000..15fee51d5176467ed210d5a773cd5684a8ce8929 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_055759.log @@ -0,0 +1,225 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_055759.log +Timestamp: 2025-10-10 05:57:59 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:58:01,905] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:04,550] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 05:58:04,552] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 3 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 3 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:58:07,202] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:08,240] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 05:58:08,240] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 05:58:08,240] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 05:58:08,240] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 05:58:08,240] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 05:58:08,240] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 05:58:08,240] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 05:58:08,242] [INFO] [launch.py:253:main] process 1777438 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:08,245] [INFO] [launch.py:253:main] process 1777439 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:08,247] [INFO] [launch.py:253:main] process 1777440 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:08,249] [INFO] [launch.py:253:main] process 1777441 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:08,251] [INFO] [launch.py:253:main] process 1777442 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:08,253] [INFO] [launch.py:253:main] process 1777443 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:08,255] [INFO] [launch.py:253:main] process 1777444 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 05:58:08,257] [INFO] [launch.py:253:main] process 1777445 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/tinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 05:58:14,883] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:15,133] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:15,196] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:15,197] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:15,232] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:15,247] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:15,253] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:15,255] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 05:58:15,294] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:15,539] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:15,605] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:15,607] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:15,655] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:15,658] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:15,659] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 05:58:15,659] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 05:58:15,669] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed. + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train + model = training_recipe.load(model, model_args) + File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load + model.load_llm(**model_args['llm']) + File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm + self.language_model = self.language_model.from_pretrained( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained + resolved_config_file = cached_file( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 398, in cached_file + resolved_file = hf_hub_download( + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 106, in _inner_fn + validate_repo_id(arg_value) + File "/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/utils/_validators.py", line 154, in validate_repo_id + raise HFValidationError( +huggingface_hub.errors.HFValidationError: Repo id must be in the form 'repo_name' or 'namespace/repo_name': '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Use `repo_type` argument if needed. + +The above exception was the direct cause of the following exception: + +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 149, in train + model = training_recipe.load(model, model_args) + File "/nfs/ywang29/TinyLLaVA/tinyllava/training_recipe/base.py", line 144, in load + model.load_llm(**model_args['llm']) + File "/nfs/ywang29/TinyLLaVA/tinyllava/model/modeling_tinyllava.py", line 360, in load_llm + self.language_model = self.language_model.from_pretrained( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/modeling_utils.py", line 3015, in from_pretrained + resolved_config_file = cached_file( + File "/nfs/ywang29/TinyLLaVA/transformers/src/transformers/utils/hub.py", line 462, in cached_file + raise EnvironmentError( +OSError: Incorrect path_or_model_id: '/nfs/ywang29/tinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model'. Please provide either the path to a local folder or the repo_id of a model on the Hub. +[2025-10-10 05:58:50,304] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1777438 +[2025-10-10 05:58:50,689] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1777439 +[2025-10-10 05:58:50,689] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1777440 +[2025-10-10 05:58:51,067] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1777441 diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_060004.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_060004.log new file mode 100644 index 0000000000000000000000000000000000000000..b0caa700c71ba686e757bce535e3ca79cbc24d62 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_060004.log @@ -0,0 +1,1167 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_060004.log +Timestamp: 2025-10-10 06:00:04 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:00:07,090] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:09,756] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 06:00:09,757] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 3 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 3 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:00:12,357] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:13,401] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 06:00:13,401] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 06:00:13,401] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 06:00:13,401] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 06:00:13,401] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 06:00:13,401] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 06:00:13,401] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 06:00:13,403] [INFO] [launch.py:253:main] process 1781271 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:13,406] [INFO] [launch.py:253:main] process 1781272 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:13,408] [INFO] [launch.py:253:main] process 1781273 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:13,410] [INFO] [launch.py:253:main] process 1781274 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:13,412] [INFO] [launch.py:253:main] process 1781275 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:13,414] [INFO] [launch.py:253:main] process 1781276 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:13,417] [INFO] [launch.py:253:main] process 1781277 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:00:13,419] [INFO] [launch.py:253:main] process 1781278 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:00:19,924] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:20,121] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:20,250] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:20,252] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:20,277] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:20,277] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:20,301] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:20,332] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:00:20,337] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:20,528] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:20,679] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:20,680] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:20,698] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:20,698] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 06:00:20,698] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:20,734] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:00:20,752] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1781271:1781271 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781271:1781271 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781271:1781271 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1781271:1781271 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1781271:1781271 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1781271:1781271 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:1781276:1781276 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1781276:1781276 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781276:1781276 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781276:1781276 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1781276:1781276 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1781276:1781276 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1781273:1781273 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1781273:1781273 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781273:1781273 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781273:1781273 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1781273:1781273 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1781273:1781273 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1781277:1781277 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1781277:1781277 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781277:1781277 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781277:1781277 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1781277:1781277 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1781277:1781277 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1781278:1781278 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1781278:1781278 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781278:1781278 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781278:1781278 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1781278:1781278 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1781278:1781278 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1781274:1781274 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1781274:1781274 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781274:1781274 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781274:1781274 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1781274:1781274 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1781274:1781274 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1781272:1781272 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1781272:1781272 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781272:1781272 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781272:1781272 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1781272:1781272 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1781272:1781272 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1781275:1781275 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1781275:1781275 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781275:1781275 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781275:1781275 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1781275:1781275 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1781275:1781275 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO ncclCommInitRank comm 0x5592c8fb1780 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x926992c77dba908b - Init START +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO ncclCommInitRank comm 0x55a5342c2d20 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x926992c77dba908b - Init START +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO ncclCommInitRank comm 0x55a0a0692db0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x926992c77dba908b - Init START +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO ncclCommInitRank comm 0x5599745c1570 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x926992c77dba908b - Init START +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO ncclCommInitRank comm 0x55ff022109a0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x926992c77dba908b - Init START +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO ncclCommInitRank comm 0x5608bb8a6150 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x926992c77dba908b - Init START +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO ncclCommInitRank comm 0x565103306080 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x926992c77dba908b - Init START +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO ncclCommInitRank comm 0x5626c29b2e80 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x926992c77dba908b - Init START +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO comm 0x5626c29b2e80 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO comm 0x5592c8fb1780 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO comm 0x55a5342c2d20 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO comm 0x55a0a0692db0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO comm 0x565103306080 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO comm 0x5608bb8a6150 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO comm 0x5599745c1570 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO comm 0x55ff022109a0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1781278:1782918 [7] NCCL INFO ncclCommInitRank comm 0x565103306080 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x926992c77dba908b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1781275:1782921 [4] NCCL INFO ncclCommInitRank comm 0x55ff022109a0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x926992c77dba908b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1781274:1782919 [3] NCCL INFO ncclCommInitRank comm 0x5626c29b2e80 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x926992c77dba908b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1781271:1782897 [0] NCCL INFO ncclCommInitRank comm 0x55a0a0692db0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x926992c77dba908b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1781276:1782898 [5] NCCL INFO ncclCommInitRank comm 0x5599745c1570 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x926992c77dba908b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1781272:1782920 [1] NCCL INFO ncclCommInitRank comm 0x55a5342c2d20 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x926992c77dba908b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1781277:1782900 [6] NCCL INFO ncclCommInitRank comm 0x5608bb8a6150 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x926992c77dba908b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1781273:1782899 [2] NCCL INFO ncclCommInitRank comm 0x5592c8fb1780 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x926992c77dba908b - Init COMPLETE +[2025-10-10 06:01:04,796] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 06:01:06,518] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +[2025-10-10 06:01:09,480] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1781271 +[2025-10-10 06:01:09,482] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1781272 +[2025-10-10 06:01:09,575] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1781273 +[2025-10-10 06:01:09,576] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1781274 +[2025-10-10 06:01:09,578] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1781275 +[2025-10-10 06:01:09,579] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1781276 +[2025-10-10 06:01:09,580] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1781277 +[2025-10-10 06:01:09,581] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1781278 +[2025-10-10 06:01:09,581] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_060004.log +Timestamp: 2025-10-10 06:01:10 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_063715.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_063715.log new file mode 100644 index 0000000000000000000000000000000000000000..f4154000065876937ce3772548e440dc42c5f9bd --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_063715.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_063715.log +Timestamp: 2025-10-10 06:37:15 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:37:18,290] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:37:21,372] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 06:37:21,373] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 3 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 3 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:37:23,997] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:37:25,026] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 06:37:25,026] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 06:37:25,026] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 06:37:25,026] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 06:37:25,026] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 06:37:25,026] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 06:37:25,026] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 06:37:25,028] [INFO] [launch.py:253:main] process 1808012 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:37:25,030] [INFO] [launch.py:253:main] process 1808013 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:37:25,032] [INFO] [launch.py:253:main] process 1808014 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:37:25,034] [INFO] [launch.py:253:main] process 1808015 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:37:25,036] [INFO] [launch.py:253:main] process 1808016 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:37:25,038] [INFO] [launch.py:253:main] process 1808017 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:37:25,041] [INFO] [launch.py:253:main] process 1808018 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:37:25,043] [INFO] [launch.py:253:main] process 1808019 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:37:31,660] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:37:31,899] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:37:31,905] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:37:31,918] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:37:31,947] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:37:31,947] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:37:31,947] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:37:31,950] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:37:32,137] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:37:32,311] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:37:32,312] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:37:32,331] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:37:32,360] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:37:32,360] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 06:37:32,360] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:37:32,362] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:37:32,365] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: Apply masks for the following modules: ['llm', 'connector'] +['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: Apply masks for the following modules: ['llm', 'connector'] +['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1808012:1808012 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808012:1808012 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808012:1808012 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1808012:1808012 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1808012:1808012 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1808012:1808012 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:1808019:1808019 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1808019:1808019 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808019:1808019 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808019:1808019 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1808019:1808019 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1808019:1808019 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1808016:1808016 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1808016:1808016 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808016:1808016 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808016:1808016 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1808016:1808016 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1808016:1808016 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1808014:1808014 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1808014:1808014 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808014:1808014 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808014:1808014 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1808014:1808014 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1808014:1808014 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1808013:1808013 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1808013:1808013 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808013:1808013 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808013:1808013 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1808013:1808013 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1808013:1808013 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1808018:1808018 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1808018:1808018 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808018:1808018 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808018:1808018 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1808018:1808018 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1808018:1808018 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1808015:1808015 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1808015:1808015 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808015:1808015 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808015:1808015 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1808015:1808015 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1808015:1808015 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1808017:1808017 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1808017:1808017 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808017:1808017 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808017:1808017 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1808017:1808017 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1808017:1808017 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO ncclCommInitRank comm 0x55ea58bb3410 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x988492fbcbf20e0 - Init START +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO ncclCommInitRank comm 0x55ca71ae4c20 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x988492fbcbf20e0 - Init START +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO ncclCommInitRank comm 0x5574a9a2d230 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x988492fbcbf20e0 - Init START +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO ncclCommInitRank comm 0x557f2adfb890 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x988492fbcbf20e0 - Init START +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO ncclCommInitRank comm 0x55a8a6819570 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x988492fbcbf20e0 - Init START +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO ncclCommInitRank comm 0x55a904014c80 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x988492fbcbf20e0 - Init START +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO ncclCommInitRank comm 0x5618b4b76f70 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x988492fbcbf20e0 - Init START +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO ncclCommInitRank comm 0x55c87abc9b20 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x988492fbcbf20e0 - Init START +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO comm 0x5574a9a2d230 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO comm 0x55a8a6819570 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO comm 0x55c87abc9b20 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO comm 0x5618b4b76f70 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO comm 0x55a904014c80 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO comm 0x55ea58bb3410 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO comm 0x55ca71ae4c20 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO comm 0x557f2adfb890 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1808014:1809606 [2] NCCL INFO ncclCommInitRank comm 0x55c87abc9b20 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x988492fbcbf20e0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1808015:1809626 [3] NCCL INFO ncclCommInitRank comm 0x5574a9a2d230 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x988492fbcbf20e0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808018:1809625 [6] NCCL INFO ncclCommInitRank comm 0x55ca71ae4c20 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x988492fbcbf20e0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1808013:1809624 [1] NCCL INFO ncclCommInitRank comm 0x55a8a6819570 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x988492fbcbf20e0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808017:1809627 [5] NCCL INFO ncclCommInitRank comm 0x55ea58bb3410 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x988492fbcbf20e0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808019:1809604 [7] NCCL INFO ncclCommInitRank comm 0x5618b4b76f70 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x988492fbcbf20e0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1808016:1809605 [4] NCCL INFO ncclCommInitRank comm 0x557f2adfb890 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x988492fbcbf20e0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1808012:1809603 [0] NCCL INFO ncclCommInitRank comm 0x55a904014c80 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x988492fbcbf20e0 - Init COMPLETE +[2025-10-10 06:38:16,595] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 06:38:18,345] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 06:51:26,412 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 06:51:26,419 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:003->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1808016:1814883 [4] NCCL INFO ncclCommInitRank comm 0x7fa6f406a5e0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xbcb422b071d46117 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808017:1814879 [5] NCCL INFO ncclCommInitRank comm 0x7facf406b6b0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xbcb422b071d46117 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808015:1814878 [3] NCCL INFO ncclCommInitRank comm 0x7f81e006b460 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xbcb422b071d46117 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808019:1814877 [7] NCCL INFO ncclCommInitRank comm 0x7f028c06b460 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xbcb422b071d46117 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808018:1814881 [6] NCCL INFO ncclCommInitRank comm 0x7f4f8806ba10 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xbcb422b071d46117 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808013:1814880 [1] NCCL INFO ncclCommInitRank comm 0x7ff1d806b4d0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xbcb422b071d46117 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808014:1814882 [2] NCCL INFO ncclCommInitRank comm 0x7f51e806b050 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xbcb422b071d46117 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1808012:1814876 [0] NCCL INFO ncclCommInitRank comm 0x7f101006be10 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xbcb422b071d46117 - Init COMPLETE + 0%| | 1/520 [00:14<2:03:46, 14.31s/it] {'loss': 2.0453, 'grad_norm': 0.004834834551695443, 'learning_rate': 0.1875, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:03:46, 14.31s/it] 0%| | 2/520 [00:18<1:10:03, 8.12s/it] {'loss': 2.0549, 'grad_norm': 0.0052494409339355185, 'learning_rate': 0.375, 'epoch': 0.0} + 0%| | 2/520 [00:18<1:10:03, 8.12s/it] 1%| | 3/520 [00:21<52:37, 6.11s/it] {'loss': 1.6735, 'grad_norm': 0.0017184601667851274, 'learning_rate': 0.5625, 'epoch': 0.01} + 1%| | 3/520 [00:21<52:37, 6.11s/it] 1%| | 4/520 [00:25<44:21, 5.16s/it] {'loss': 1.5347, 'grad_norm': 0.0007856282000345006, 'learning_rate': 0.75, 'epoch': 0.01} + 1%| | 4/520 [00:25<44:21, 5.16s/it] 1%| | 5/520 [00:29<39:46, 4.63s/it] {'loss': 1.5582, 'grad_norm': 0.0012848561325144862, 'learning_rate': 0.9375, 'epoch': 0.01} + 1%| | 5/520 [00:29<39:46, 4.63s/it] 1%| | 6/520 [00:33<37:16, 4.35s/it] {'loss': 1.494, 'grad_norm': 0.004339198013941472, 'learning_rate': 1.125, 'epoch': 0.01} + 1%| | 6/520 [00:33<37:16, 4.35s/it] 1%|▏ | 7/520 [00:36<35:42, 4.18s/it] {'loss': 2.2802, 'grad_norm': 0.030873608462719065, 'learning_rate': 1.3125, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<35:42, 4.18s/it] 2%|▏ | 8/520 [00:41<36:33, 4.28s/it] {'loss': 5.5061, 'grad_norm': 0.1650858908192064, 'learning_rate': 1.5, 'epoch': 0.02} + 2%|▏ | 8/520 [00:41<36:33, 4.28s/it] 2%|▏ | 9/520 [00:45<35:22, 4.15s/it] {'loss': 10.4039, 'grad_norm': 0.6061952288906655, 'learning_rate': 1.6875, 'epoch': 0.02} + 2%|▏ | 9/520 [00:45<35:22, 4.15s/it] 2%|▏ | 10/520 [00:48<34:03, 4.01s/it] {'loss': 6.868, 'grad_norm': 0.19576363306257452, 'learning_rate': 1.875, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<34:03, 4.01s/it] 2%|▏ | 11/520 [00:52<33:45, 3.98s/it] {'loss': 13.1619, 'grad_norm': 0.9435281199507062, 'learning_rate': 2.0625, 'epoch': 0.02} + 2%|▏ | 11/520 [00:52<33:45, 3.98s/it] 2%|▏ | 12/520 [00:56<33:37, 3.97s/it] {'loss': 10.4023, 'grad_norm': 0.38739256581654413, 'learning_rate': 2.25, 'epoch': 0.02} + 2%|▏ | 12/520 [00:56<33:37, 3.97s/it][2025-10-10 06:52:32,374] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:01<34:32, 4.09s/it] {'loss': 15.6731, 'grad_norm': 1.82240358285546, 'learning_rate': 2.4375, 'epoch': 0.03} + 2%|▎ | 13/520 [01:01<34:32, 4.09s/it] 3%|▎ | 14/520 [01:04<33:24, 3.96s/it] {'loss': 13.2103, 'grad_norm': 0.15074277387742913, 'learning_rate': 2.625, 'epoch': 0.03} + 3%|▎ | 14/520 [01:04<33:24, 3.96s/it] 3%|▎ | 15/520 [01:08<32:33, 3.87s/it] {'loss': 11.3062, 'grad_norm': 0.12287023364520526, 'learning_rate': 2.8125, 'epoch': 0.03} + 3%|▎ | 15/520 [01:08<32:33, 3.87s/it] 3%|▎ | 16/520 [01:12<31:56, 3.80s/it] {'loss': 12.3631, 'grad_norm': 0.07283268858907757, 'learning_rate': 3.0, 'epoch': 0.03} + 3%|▎ | 16/520 [01:12<31:56, 3.80s/it] 3%|▎ | 17/520 [01:15<31:36, 3.77s/it] {'loss': 8.5939, 'grad_norm': 0.01874452997941495, 'learning_rate': 2.9999708593973073, 'epoch': 0.03} + 3%|▎ | 17/520 [01:15<31:36, 3.77s/it] 3%|▎ | 18/520 [01:19<31:17, 3.74s/it] {'loss': 8.6412, 'grad_norm': 0.01840213141736506, 'learning_rate': 2.9998834387214623, 'epoch': 0.03} + 3%|▎ | 18/520 [01:19<31:17, 3.74s/it] 4%|▎ | 19/520 [01:23<31:01, 3.71s/it] {'loss': 8.3589, 'grad_norm': 0.008950922346914282, 'learning_rate': 2.9997377413691195, 'epoch': 0.04} + 4%|▎ | 19/520 [01:23<31:01, 3.71s/it] 4%|▍ | 20/520 [01:26<30:55, 3.71s/it] {'loss': 7.6785, 'grad_norm': 0.013684752586009033, 'learning_rate': 2.9995337730012244, 'epoch': 0.04} + 4%|▍ | 20/520 [01:26<30:55, 3.71s/it] 4%|▍ | 21/520 [01:30<30:49, 3.71s/it] {'loss': 8.0382, 'grad_norm': 0.011452214581355048, 'learning_rate': 2.9992715415427913, 'epoch': 0.04} + 4%|▍ | 21/520 [01:30<30:49, 3.71s/it] 4%|▍ | 22/520 [01:34<30:43, 3.70s/it] {'loss': 6.9687, 'grad_norm': 0.007196141036336213, 'learning_rate': 2.998951057182598, 'epoch': 0.04} + 4%|▍ | 22/520 [01:34<30:43, 3.70s/it] 4%|▍ | 23/520 [01:37<30:38, 3.70s/it] {'loss': 6.714, 'grad_norm': 0.00702260090413866, 'learning_rate': 2.998572332372787, 'epoch': 0.04} + 4%|▍ | 23/520 [01:37<30:38, 3.70s/it] 5%|▍ | 24/520 [01:41<30:34, 3.70s/it] {'loss': 7.2766, 'grad_norm': 0.003973569399987333, 'learning_rate': 2.9981353818283836, 'epoch': 0.05} + 5%|▍ | 24/520 [01:41<30:34, 3.70s/it] 5%|▍ | 25/520 [01:45<30:39, 3.72s/it] {'loss': 6.671, 'grad_norm': 0.006030180384767614, 'learning_rate': 2.9976402225267247, 'epoch': 0.05} + 5%|▍ | 25/520 [01:45<30:39, 3.72s/it] 5%|▌ | 26/520 [01:49<30:31, 3.71s/it] {'loss': 6.6206, 'grad_norm': 0.0030050195792045305, 'learning_rate': 2.997086873706798, 'epoch': 0.05} + 5%|▌ | 26/520 [01:49<30:31, 3.71s/it] 5%|▌ | 27/520 [01:52<30:21, 3.69s/it] {'loss': 6.3363, 'grad_norm': 0.002539328259550928, 'learning_rate': 2.996475356868493, 'epoch': 0.05} + 5%|▌ | 27/520 [01:52<30:21, 3.69s/it] 5%|▌ | 28/520 [01:56<30:24, 3.71s/it] {'loss': 6.1736, 'grad_norm': 0.0023411020575382973, 'learning_rate': 2.99580569577177, 'epoch': 0.05} + 5%|▌ | 28/520 [01:56<30:24, 3.71s/it] 6%|▌ | 29/520 [02:00<30:22, 3.71s/it] {'loss': 6.0935, 'grad_norm': 0.0017757841166319845, 'learning_rate': 2.995077916435733, 'epoch': 0.06} + 6%|▌ | 29/520 [02:00<30:22, 3.71s/it] 6%|▌ | 30/520 [02:03<30:21, 3.72s/it] {'loss': 6.8917, 'grad_norm': 0.0015541385633872362, 'learning_rate': 2.9942920471376184, 'epoch': 0.06} + 6%|▌ | 30/520 [02:03<30:21, 3.72s/it] 6%|▌ | 31/520 [02:07<30:24, 3.73s/it] {'loss': 5.851, 'grad_norm': 0.001636262248044816, 'learning_rate': 2.9934481184117008, 'epoch': 0.06} + 6%|▌ | 31/520 [02:07<30:24, 3.73s/it] 6%|▌ | 32/520 [02:11<30:39, 3.77s/it] {'loss': 7.6621, 'grad_norm': 0.000977903631180197, 'learning_rate': 2.992546163048102, 'epoch': 0.06} + 6%|▌ | 32/520 [02:11<30:39, 3.77s/it] 6%|▋ | 33/520 [02:15<30:55, 3.81s/it] {'loss': 6.1617, 'grad_norm': 0.0010155972444698157, 'learning_rate': 2.9915862160915196, 'epoch': 0.06} + 6%|▋ | 33/520 [02:15<30:55, 3.81s/it] 7%|▋ | 34/520 [02:19<31:01, 3.83s/it] {'loss': 5.8189, 'grad_norm': 0.0009161402227807398, 'learning_rate': 2.990568314839864, 'epoch': 0.07} + 7%|▋ | 34/520 [02:19<31:01, 3.83s/it] 7%|▋ | 35/520 [02:23<30:40, 3.80s/it] {'loss': 6.0184, 'grad_norm': 0.0006757548120273947, 'learning_rate': 2.989492498842809, 'epoch': 0.07} + 7%|▋ | 35/520 [02:23<30:40, 3.80s/it] 7%|▋ | 36/520 [02:26<30:20, 3.76s/it] {'loss': 6.3233, 'grad_norm': 0.0005342897949090439, 'learning_rate': 2.9883588099002583, 'epoch': 0.07} + 7%|▋ | 36/520 [02:26<30:20, 3.76s/it] 7%|▋ | 37/520 [02:30<30:07, 3.74s/it] {'loss': 6.641, 'grad_norm': 0.0004973989979846121, 'learning_rate': 2.9871672920607155, 'epoch': 0.07} + 7%|▋ | 37/520 [02:30<30:07, 3.74s/it] 7%|▋ | 38/520 [02:34<29:51, 3.72s/it] {'loss': 6.3157, 'grad_norm': 0.0005639828485782039, 'learning_rate': 2.985917991619579, 'epoch': 0.07} + 7%|▋ | 38/520 [02:34<29:51, 3.72s/it] 8%|▊ | 39/520 [02:37<29:40, 3.70s/it] {'loss': 5.8101, 'grad_norm': 0.0006349067744629548, 'learning_rate': 2.984610957117339, 'epoch': 0.07} + 8%|▊ | 39/520 [02:37<29:40, 3.70s/it] 8%|▊ | 40/520 [02:41<29:29, 3.69s/it] {'loss': 5.8423, 'grad_norm': 0.0006949424230716011, 'learning_rate': 2.9832462393376926, 'epoch': 0.08} + 8%|▊ | 40/520 [02:41<29:29, 3.69s/it] 8%|▊ | 41/520 [02:45<29:25, 3.69s/it] {'loss': 5.9219, 'grad_norm': 0.0007365755453551477, 'learning_rate': 2.981823891305572, 'epoch': 0.08} + 8%|▊ | 41/520 [02:45<29:25, 3.69s/it] 8%|▊ | 42/520 [02:48<29:24, 3.69s/it] {'loss': 6.2053, 'grad_norm': 0.0007102014280647583, 'learning_rate': 2.980343968285082, 'epoch': 0.08} + 8%|▊ | 42/520 [02:48<29:24, 3.69s/it] 8%|▊ | 43/520 [02:52<29:19, 3.69s/it] {'loss': 6.6717, 'grad_norm': 0.0005426064434514232, 'learning_rate': 2.978806527777354, 'epoch': 0.08} + 8%|▊ | 43/520 [02:52<29:19, 3.69s/it] 8%|▊ | 44/520 [02:56<29:20, 3.70s/it] {'loss': 6.7829, 'grad_norm': 0.0003869633362060351, 'learning_rate': 2.977211629518312, 'epoch': 0.08} + 8%|▊ | 44/520 [02:56<29:20, 3.70s/it] 9%|▊ | 45/520 [02:59<29:16, 3.70s/it] {'loss': 5.6447, 'grad_norm': 0.0004840488982320683, 'learning_rate': 2.975559335476352, 'epoch': 0.09} + 9%|▊ | 45/520 [02:59<29:16, 3.70s/it] 9%|▉ | 46/520 [03:03<29:09, 3.69s/it] {'loss': 7.1071, 'grad_norm': 0.00037427258006097347, 'learning_rate': 2.9738497098499326, 'epoch': 0.09} + 9%|▉ | 46/520 [03:03<29:09, 3.69s/it] 9%|▉ | 47/520 [03:07<29:04, 3.69s/it] {'loss': 5.9254, 'grad_norm': 0.00040138184916306736, 'learning_rate': 2.972082819065082, 'epoch': 0.09} + 9%|▉ | 47/520 [03:07<29:04, 3.69s/it] 9%|▉ | 48/520 [03:10<28:59, 3.69s/it] {'loss': 5.6713, 'grad_norm': 0.00048372222386857094, 'learning_rate': 2.970258731772816, 'epoch': 0.09} + 9%|▉ | 48/520 [03:10<28:59, 3.69s/it] 9%|▉ | 49/520 [03:14<29:00, 3.70s/it] {'loss': 5.7152, 'grad_norm': 0.00039663966857263585, 'learning_rate': 2.9683775188464727, 'epoch': 0.09} + 9%|▉ | 49/520 [03:14<29:00, 3.70s/it] 10%|▉ | 50/520 [03:18<28:54, 3.69s/it] {'loss': 5.6723, 'grad_norm': 0.0003810799508398198, 'learning_rate': 2.966439253378957, 'epoch': 0.1} + 10%|▉ | 50/520 [03:18<28:54, 3.69s/it] 10%|▉ | 51/520 [03:21<28:47, 3.68s/it] {'loss': 5.4172, 'grad_norm': 0.00046000737554743464, 'learning_rate': 2.9644440106799, 'epoch': 0.1} + 10%|▉ | 51/520 [03:21<28:47, 3.68s/it] 10%|█ | 52/520 [03:25<28:47, 3.69s/it] {'loss': 5.9727, 'grad_norm': 0.00031661423441705304, 'learning_rate': 2.9623918682727353, 'epoch': 0.1} + 10%|█ | 52/520 [03:25<28:47, 3.69s/it] 10%|█ | 53/520 [03:29<28:44, 3.69s/it] {'loss': 5.9423, 'grad_norm': 0.00033127197810928953, 'learning_rate': 2.9602829058916846, 'epoch': 0.1} + 10%|█ | 53/520 [03:29<28:44, 3.69s/it] 10%|█ | 54/520 [03:33<28:55, 3.72s/it] {'loss': 5.3811, 'grad_norm': 0.00034044167440854466, 'learning_rate': 2.9581172054786617, 'epoch': 0.1} + 10%|█ | 54/520 [03:33<28:55, 3.72s/it] 11%|█ | 55/520 [03:37<29:10, 3.76s/it] {'loss': 5.5807, 'grad_norm': 0.00035500403681282906, 'learning_rate': 2.955894851180086, 'epoch': 0.11} + 11%|█ | 55/520 [03:37<29:10, 3.76s/it] 11%|█ | 56/520 [03:40<29:21, 3.80s/it] {'loss': 5.915, 'grad_norm': 0.0003756794238861492, 'learning_rate': 2.953615929343617, 'epoch': 0.11} + 11%|█ | 56/520 [03:40<29:21, 3.80s/it] 11%|█ | 57/520 [03:44<29:30, 3.82s/it] {'loss': 5.5056, 'grad_norm': 0.0003910217682071923, 'learning_rate': 2.9512805285147943, 'epoch': 0.11} + 11%|█ | 57/520 [03:44<29:30, 3.82s/it] 11%|█ | 58/520 [03:48<29:31, 3.83s/it] {'loss': 5.7875, 'grad_norm': 0.00035691773963804054, 'learning_rate': 2.9488887394336025, 'epoch': 0.11} + 11%|█ | 58/520 [03:48<29:31, 3.83s/it] 11%|█▏ | 59/520 [03:52<29:35, 3.85s/it] {'loss': 6.2315, 'grad_norm': 0.00043767797447056314, 'learning_rate': 2.9464406550309414, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:52<29:35, 3.85s/it] 12%|█▏ | 60/520 [03:56<29:32, 3.85s/it] {'loss': 5.7791, 'grad_norm': 0.00043410677050615586, 'learning_rate': 2.9439363704250177, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:56<29:32, 3.85s/it] 12%|█▏ | 61/520 [04:00<29:01, 3.79s/it] {'loss': 6.8196, 'grad_norm': 0.0003590603583335452, 'learning_rate': 2.9413759829176493, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:00<29:01, 3.79s/it] 12%|█▏ | 62/520 [04:03<28:34, 3.74s/it] {'loss': 5.8135, 'grad_norm': 0.0003132479471501588, 'learning_rate': 2.9387595919904816, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:03<28:34, 3.74s/it] 12%|█▏ | 63/520 [04:07<28:17, 3.71s/it] {'loss': 5.6035, 'grad_norm': 0.0003562910984864389, 'learning_rate': 2.936087299301127, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:07<28:17, 3.71s/it] 12%|█▏ | 64/520 [04:10<28:09, 3.71s/it] {'loss': 5.606, 'grad_norm': 0.00033901493730383644, 'learning_rate': 2.933359208679211, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:10<28:09, 3.71s/it] 12%|█▎ | 65/520 [04:14<28:06, 3.71s/it] {'loss': 5.8511, 'grad_norm': 0.00033443967459376975, 'learning_rate': 2.9305754261223402, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:14<28:06, 3.71s/it] 13%|█▎ | 66/520 [04:18<27:54, 3.69s/it] {'loss': 5.8323, 'grad_norm': 0.0002698128032604843, 'learning_rate': 2.9277360597919837, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:18<27:54, 3.69s/it] 13%|█▎ | 67/520 [04:22<27:46, 3.68s/it] {'loss': 5.383, 'grad_norm': 0.00042835438533101155, 'learning_rate': 2.924841220009269, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:22<27:46, 3.68s/it] 13%|█▎ | 68/520 [04:25<27:43, 3.68s/it] {'loss': 5.2485, 'grad_norm': 0.00039146125511480294, 'learning_rate': 2.9218910192506975, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:25<27:43, 3.68s/it] 13%|█▎ | 69/520 [04:29<27:37, 3.68s/it] {'loss': 5.2631, 'grad_norm': 0.00042262943225013596, 'learning_rate': 2.9188855721437736, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:29<27:37, 3.68s/it] 13%|█▎ | 70/520 [04:33<27:31, 3.67s/it] {'loss': 5.468, 'grad_norm': 0.00030983357817113545, 'learning_rate': 2.9158249954625513, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:33<27:31, 3.67s/it] 14%|█▎ | 71/520 [04:36<27:28, 3.67s/it] {'loss': 5.333, 'grad_norm': 0.00028157773735116085, 'learning_rate': 2.9127094081230953, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:36<27:28, 3.67s/it] 14%|█▍ | 72/520 [04:40<27:22, 3.67s/it] {'loss': 5.6075, 'grad_norm': 0.00023049181151872807, 'learning_rate': 2.9095389311788624, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:40<27:22, 3.67s/it] 14%|█▍ | 73/520 [04:44<27:21, 3.67s/it] {'loss': 5.1565, 'grad_norm': 0.0003032358852586855, 'learning_rate': 2.9063136878159987, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:44<27:21, 3.67s/it] 14%|█▍ | 74/520 [04:47<27:21, 3.68s/it] {'loss': 5.6376, 'grad_norm': 0.00023118482431945162, 'learning_rate': 2.903033803348551, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:47<27:21, 3.68s/it] 14%|█▍ | 75/520 [04:51<27:11, 3.67s/it] {'loss': 4.9659, 'grad_norm': 0.00031786007226631413, 'learning_rate': 2.8996994052135996, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:51<27:11, 3.67s/it] 15%|█▍ | 76/520 [04:55<27:10, 3.67s/it] {'loss': 6.6199, 'grad_norm': 0.0002159933752876736, 'learning_rate': 2.8963106229663063, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:55<27:10, 3.67s/it] 15%|█▍ | 77/520 [04:58<27:09, 3.68s/it] {'loss': 5.2644, 'grad_norm': 0.00032752305511126264, 'learning_rate': 2.89286758827488, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:58<27:09, 3.68s/it] 15%|█▌ | 78/520 [05:02<27:03, 3.67s/it] {'loss': 5.191, 'grad_norm': 0.00026872810342131186, 'learning_rate': 2.889370434915463, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:02<27:03, 3.67s/it] 15%|█▌ | 79/520 [05:06<26:59, 3.67s/it] {'loss': 5.4394, 'grad_norm': 0.0002188805522976963, 'learning_rate': 2.88581929876693, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:06<26:59, 3.67s/it] 15%|█▌ | 80/520 [05:09<26:55, 3.67s/it] {'loss': 6.9257, 'grad_norm': 0.0001869805122572848, 'learning_rate': 2.8822143178056114, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:09<26:55, 3.67s/it] 16%|█▌ | 81/520 [05:13<26:54, 3.68s/it] {'loss': 6.0573, 'grad_norm': 0.00022371901016092821, 'learning_rate': 2.878555632099931, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:13<26:54, 3.68s/it] 16%|█▌ | 82/520 [05:17<26:51, 3.68s/it] {'loss': 5.4543, 'grad_norm': 0.00021863028263493107, 'learning_rate': 2.874843383804964, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:17<26:51, 3.68s/it] 16%|█▌ | 83/520 [05:20<26:44, 3.67s/it] {'loss': 5.6533, 'grad_norm': 0.00020660040808356077, 'learning_rate': 2.871077717156915, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:20<26:44, 3.67s/it] 16%|█▌ | 84/520 [05:24<26:50, 3.69s/it] {'loss': 5.612, 'grad_norm': 0.00023192712528329284, 'learning_rate': 2.8672587784675097, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:24<26:50, 3.69s/it] 16%|█▋ | 85/520 [05:28<27:07, 3.74s/it] {'loss': 5.4861, 'grad_norm': 0.0002316675854962952, 'learning_rate': 2.8633867161183164, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:28<27:07, 3.74s/it] 17%|█▋ | 86/520 [05:32<27:24, 3.79s/it] {'loss': 6.0463, 'grad_norm': 0.00021240808737720262, 'learning_rate': 2.859461680554975, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:32<27:24, 3.79s/it] 17%|█▋ | 87/520 [05:36<27:32, 3.82s/it] {'loss': 6.6186, 'grad_norm': 0.0002589584036956361, 'learning_rate': 2.855483824281355, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:36<27:32, 3.82s/it] 17%|█▋ | 88/520 [05:40<27:36, 3.83s/it] {'loss': 7.1509, 'grad_norm': 0.0002406161995830177, 'learning_rate': 2.8514533018536286, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:40<27:36, 3.83s/it] 17%|█▋ | 89/520 [05:43<27:35, 3.84s/it] {'loss': 5.4411, 'grad_norm': 0.00027341894498903905, 'learning_rate': 2.8473702698742662, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:43<27:35, 3.84s/it] 17%|█▋ | 90/520 [05:47<27:28, 3.83s/it] {'loss': 5.2477, 'grad_norm': 0.00027197870041133915, 'learning_rate': 2.843234886985951, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:47<27:28, 3.83s/it] 18%|█▊ | 91/520 [05:51<27:02, 3.78s/it] {'loss': 5.5773, 'grad_norm': 0.0003278518495147687, 'learning_rate': 2.839047313865417, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:51<27:02, 3.78s/it] 18%|█▊ | 92/520 [05:55<26:45, 3.75s/it] {'loss': 5.3126, 'grad_norm': 0.00032073508795762594, 'learning_rate': 2.834807713217203, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:55<26:45, 3.75s/it] 18%|█▊ | 93/520 [05:58<27:03, 3.80s/it] {'loss': 5.3832, 'grad_norm': 0.0003238192834947097, 'learning_rate': 2.8305162497673324, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:58<27:03, 3.80s/it] 18%|█▊ | 94/520 [06:02<26:42, 3.76s/it] {'loss': 5.8202, 'grad_norm': 0.00022658082692807264, 'learning_rate': 2.8261730902569147, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:02<26:42, 3.76s/it] 18%|█▊ | 95/520 [06:06<26:28, 3.74s/it] {'loss': 5.4301, 'grad_norm': 0.0006053167968189525, 'learning_rate': 2.8217784034356637, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:06<26:28, 3.74s/it] 18%|█▊ | 96/520 [06:09<26:14, 3.71s/it] {'loss': 5.1016, 'grad_norm': 0.00046588066168309244, 'learning_rate': 2.817332360055343, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:09<26:14, 3.71s/it] 19%|█▊ | 97/520 [06:13<26:06, 3.70s/it] {'loss': 5.4235, 'grad_norm': 0.0003571136441511053, 'learning_rate': 2.812835132863131, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:13<26:06, 3.70s/it] 19%|█▉ | 98/520 [06:17<26:01, 3.70s/it] {'loss': 5.046, 'grad_norm': 0.0002933755863144933, 'learning_rate': 2.8082868965949084, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:17<26:01, 3.70s/it] 19%|█▉ | 99/520 [06:21<25:54, 3.69s/it] {'loss': 5.5496, 'grad_norm': 0.0002570416133298451, 'learning_rate': 2.80368782796847, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:21<25:54, 3.69s/it] 19%|█▉ | 100/520 [06:24<26:04, 3.73s/it] {'loss': 6.3151, 'grad_norm': 0.0002049628760924552, 'learning_rate': 2.799038105676658, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:24<26:04, 3.73s/it] 19%|█▉ | 101/520 [06:28<25:58, 3.72s/it] {'loss': 5.3244, 'grad_norm': 0.00024075983335202652, 'learning_rate': 2.7943379103804196, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:28<25:58, 3.72s/it] 20%|█▉ | 102/520 [06:32<25:51, 3.71s/it] {'loss': 5.2795, 'grad_norm': 0.00025228741520643756, 'learning_rate': 2.7895874247017853, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:32<25:51, 3.71s/it] 20%|█▉ | 103/520 [06:36<26:07, 3.76s/it] {'loss': 4.8422, 'grad_norm': 0.00037793185959997103, 'learning_rate': 2.7847868332167773, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:36<26:07, 3.76s/it] 20%|██ | 104/520 [06:39<26:17, 3.79s/it] {'loss': 5.5611, 'grad_norm': 0.0002760443555476556, 'learning_rate': 2.7799363224482336, 'epoch': 0.2} + 20%|██ | 104/520 [06:39<26:17, 3.79s/it] 20%|██ | 105/520 [06:43<26:35, 3.84s/it] {'loss': 5.3304, 'grad_norm': 0.00029497884747557354, 'learning_rate': 2.7750360808585635, 'epoch': 0.2} + 20%|██ | 105/520 [06:43<26:35, 3.84s/it] 20%|██ | 106/520 [06:47<26:34, 3.85s/it] {'loss': 6.2773, 'grad_norm': 0.00023305240023118248, 'learning_rate': 2.7700862988424264, 'epoch': 0.2} + 20%|██ | 106/520 [06:47<26:34, 3.85s/it] 21%|██ | 107/520 [06:51<26:31, 3.85s/it] {'loss': 6.3399, 'grad_norm': 0.00022012500561308882, 'learning_rate': 2.7650871687193286, 'epoch': 0.21} + 21%|██ | 107/520 [06:51<26:31, 3.85s/it] 21%|██ | 108/520 [06:55<26:30, 3.86s/it] {'loss': 5.3255, 'grad_norm': 0.00026293774734709955, 'learning_rate': 2.7600388847261574, 'epoch': 0.21} + 21%|██ | 108/520 [06:55<26:30, 3.86s/it] 21%|██ | 109/520 [06:59<26:28, 3.86s/it] {'loss': 6.3469, 'grad_norm': 0.00021848672135169522, 'learning_rate': 2.7549416430096296, 'epoch': 0.21} + 21%|██ | 109/520 [06:59<26:28, 3.86s/it] 21%|██ | 110/520 [07:03<26:22, 3.86s/it] {'loss': 5.8704, 'grad_norm': 0.00023585549051415193, 'learning_rate': 2.7497956416186735, 'epoch': 0.21} + 21%|██ | 110/520 [07:03<26:22, 3.86s/it] 21%|██▏ | 111/520 [07:07<26:18, 3.86s/it] {'loss': 5.8633, 'grad_norm': 0.0001539062941230191, 'learning_rate': 2.7446010804967313, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:07<26:18, 3.86s/it] 22%|██▏ | 112/520 [07:10<26:12, 3.85s/it] {'loss': 5.7505, 'grad_norm': 0.00022449837826022022, 'learning_rate': 2.7393581614739926, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:10<26:12, 3.85s/it] 22%|██▏ | 113/520 [07:14<26:14, 3.87s/it] {'loss': 5.0658, 'grad_norm': 0.0002501092829398313, 'learning_rate': 2.73406708825955, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:14<26:14, 3.87s/it] 22%|██▏ | 114/520 [07:18<26:07, 3.86s/it] {'loss': 5.8565, 'grad_norm': 0.00022116271534717693, 'learning_rate': 2.728728066433488, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:18<26:07, 3.86s/it] 22%|██▏ | 115/520 [07:22<26:15, 3.89s/it] {'loss': 6.1638, 'grad_norm': 0.0001906678002377514, 'learning_rate': 2.7233413034388905, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:22<26:15, 3.89s/it] 22%|██▏ | 116/520 [07:26<26:46, 3.98s/it] {'loss': 5.6732, 'grad_norm': 0.00023632841082829096, 'learning_rate': 2.717907008573785, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:26<26:46, 3.98s/it] 22%|██▎ | 117/520 [07:30<26:33, 3.95s/it] {'loss': 5.7659, 'grad_norm': 0.00023360458299437066, 'learning_rate': 2.712425392983008, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:30<26:33, 3.95s/it] 23%|██▎ | 118/520 [07:34<26:50, 4.01s/it] {'loss': 5.1256, 'grad_norm': 0.00022422488108435416, 'learning_rate': 2.7068966696500025, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:34<26:50, 4.01s/it] 23%|██▎ | 119/520 [07:39<27:07, 4.06s/it] {'loss': 4.9368, 'grad_norm': 0.0002548668805137285, 'learning_rate': 2.701321053388542, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:39<27:07, 4.06s/it] 23%|██▎ | 120/520 [07:43<27:17, 4.09s/it] {'loss': 5.1911, 'grad_norm': 0.00027815355505609744, 'learning_rate': 2.6956987608343836, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:43<27:17, 4.09s/it] 23%|██▎ | 121/520 [07:47<27:23, 4.12s/it] {'loss': 5.0938, 'grad_norm': 0.0002554272477884347, 'learning_rate': 2.690030010436853, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:47<27:23, 4.12s/it] 23%|██▎ | 122/520 [07:51<27:06, 4.09s/it] {'loss': 4.9725, 'grad_norm': 0.00029826214588749456, 'learning_rate': 2.6843150224503534, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:51<27:06, 4.09s/it] 24%|██▎ | 123/520 [07:55<26:40, 4.03s/it] {'loss': 6.4706, 'grad_norm': 0.00017945559499485435, 'learning_rate': 2.6785540189258104, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:55<26:40, 4.03s/it] 24%|██▍ | 124/520 [07:59<26:16, 3.98s/it] {'loss': 5.7261, 'grad_norm': 0.0004771250616825665, 'learning_rate': 2.6727472237020446, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:59<26:16, 3.98s/it] 24%|██▍ | 125/520 [08:03<26:07, 3.97s/it] {'loss': 5.3189, 'grad_norm': 0.0004424799196922004, 'learning_rate': 2.666894862397072, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:03<26:07, 3.97s/it] 24%|██▍ | 126/520 [08:07<27:10, 4.14s/it] {'loss': 5.6179, 'grad_norm': 0.0003909337234160405, 'learning_rate': 2.660997162399341, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:07<27:10, 4.14s/it] 24%|██▍ | 127/520 [08:11<26:33, 4.06s/it] {'loss': 5.5253, 'grad_norm': 0.0003338222149736179, 'learning_rate': 2.6550543528588944, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:11<26:33, 4.06s/it] 25%|██▍ | 128/520 [08:15<26:12, 4.01s/it] {'loss': 5.4197, 'grad_norm': 0.0003258541622605961, 'learning_rate': 2.649066664678467, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:15<26:12, 4.01s/it] 25%|██▍ | 129/520 [08:19<25:48, 3.96s/it] {'loss': 4.8433, 'grad_norm': 0.00027932083018295916, 'learning_rate': 2.6430343305045163, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:19<25:48, 3.96s/it] 25%|██▌ | 130/520 [08:23<25:35, 3.94s/it] {'loss': 5.6552, 'grad_norm': 0.00014573642178231392, 'learning_rate': 2.6369575847181794, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:23<25:35, 3.94s/it] 25%|██▌ | 131/520 [08:26<25:22, 3.91s/it] {'loss': 6.147, 'grad_norm': 0.0001634023328213336, 'learning_rate': 2.6308366634261695, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:26<25:22, 3.91s/it] 25%|██▌ | 132/520 [08:30<25:09, 3.89s/it] {'loss': 5.5357, 'grad_norm': 0.00016506877799784829, 'learning_rate': 2.6246718044516015, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:30<25:09, 3.89s/it] 26%|██▌ | 133/520 [08:34<24:47, 3.84s/it] {'loss': 5.3133, 'grad_norm': 0.00026254466153045853, 'learning_rate': 2.6184632473247484, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:34<24:47, 3.84s/it] 26%|██▌ | 134/520 [08:38<24:25, 3.80s/it] {'loss': 5.3641, 'grad_norm': 0.00015783525085956052, 'learning_rate': 2.61221123327374, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:38<24:25, 3.80s/it] 26%|██▌ | 135/520 [08:41<24:14, 3.78s/it] {'loss': 5.7441, 'grad_norm': 0.00014627825705266893, 'learning_rate': 2.605916005215186, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:41<24:14, 3.78s/it] 26%|██▌ | 136/520 [08:45<24:31, 3.83s/it] {'loss': 5.1939, 'grad_norm': 0.00017914602325003358, 'learning_rate': 2.5995778077447396, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:45<24:31, 3.83s/it] 26%|██▋ | 137/520 [08:49<24:17, 3.80s/it] {'loss': 5.4025, 'grad_norm': 0.0002004017383957171, 'learning_rate': 2.5931968871275926, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:49<24:17, 3.80s/it] 27%|██▋ | 138/520 [08:53<24:19, 3.82s/it] {'loss': 5.1605, 'grad_norm': 0.0001863523828484656, 'learning_rate': 2.586773491288909, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:53<24:19, 3.82s/it] 27%|██▋ | 139/520 [08:57<24:20, 3.83s/it] {'loss': 5.7164, 'grad_norm': 0.00016085193064580713, 'learning_rate': 2.58030786980419, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:57<24:20, 3.83s/it] 27%|██▋ | 140/520 [09:01<24:20, 3.84s/it] {'loss': 6.2761, 'grad_norm': 0.00014449080317369625, 'learning_rate': 2.5738002738895776, 'epoch': 0.27} + 27%|██▋ | 140/520 [09:01<24:20, 3.84s/it] 27%|██▋ | 141/520 [09:05<24:20, 3.85s/it] {'loss': 5.5044, 'grad_norm': 0.0002873687942001402, 'learning_rate': 2.5672509563920953, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:05<24:20, 3.85s/it] 27%|██▋ | 142/520 [09:09<24:18, 3.86s/it] {'loss': 6.2496, 'grad_norm': 0.00021362910812410213, 'learning_rate': 2.560660171779821, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:09<24:18, 3.86s/it] 28%|██▊ | 143/520 [09:12<24:17, 3.87s/it] {'loss': 5.4931, 'grad_norm': 0.0001982860293728391, 'learning_rate': 2.554028176132004, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:12<24:17, 3.87s/it] 28%|██▊ | 144/520 [09:16<24:11, 3.86s/it] {'loss': 4.813, 'grad_norm': 0.00021034402031171496, 'learning_rate': 2.547355227129109, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:16<24:11, 3.86s/it] 28%|██▊ | 145/520 [09:20<24:07, 3.86s/it] {'loss': 5.0289, 'grad_norm': 0.0002241599779684819, 'learning_rate': 2.5406415840428123, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:20<24:07, 3.86s/it] 28%|██▊ | 146/520 [09:24<24:10, 3.88s/it] {'loss': 6.3049, 'grad_norm': 0.0001417668454520477, 'learning_rate': 2.5338875077259204, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:24<24:10, 3.88s/it] 28%|██▊ | 147/520 [09:28<24:04, 3.87s/it] {'loss': 4.9017, 'grad_norm': 0.00018408761326414952, 'learning_rate': 2.52709326060224, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:28<24:04, 3.87s/it] 28%|██▊ | 148/520 [09:32<24:06, 3.89s/it] {'loss': 5.179, 'grad_norm': 0.00018683825399842775, 'learning_rate': 2.520259106656379, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:32<24:06, 3.89s/it] 29%|██▊ | 149/520 [09:36<24:00, 3.88s/it] {'loss': 5.1439, 'grad_norm': 0.0001809433399103933, 'learning_rate': 2.5133853114234905, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:36<24:00, 3.88s/it] 29%|██▉ | 150/520 [09:40<23:50, 3.87s/it] {'loss': 5.4979, 'grad_norm': 0.00021160675573843692, 'learning_rate': 2.5064721419789553, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:40<23:50, 3.87s/it] 29%|██▉ | 151/520 [09:43<23:53, 3.88s/it] {'loss': 5.0012, 'grad_norm': 0.00016146020506233649, 'learning_rate': 2.499519866928006, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:43<23:53, 3.88s/it] 29%|██▉ | 152/520 [09:47<23:45, 3.87s/it] {'loss': 5.1214, 'grad_norm': 0.00017534355288599556, 'learning_rate': 2.492528756395289, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:47<23:45, 3.87s/it] 29%|██▉ | 153/520 [09:51<23:36, 3.86s/it] {'loss': 5.046, 'grad_norm': 0.00017570043451744098, 'learning_rate': 2.4854990820143708, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:51<23:36, 3.86s/it] 30%|██▉ | 154/520 [09:55<23:36, 3.87s/it] {'loss': 5.4277, 'grad_norm': 0.00018910187531767948, 'learning_rate': 2.4784311169171818, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:55<23:36, 3.87s/it] 30%|██▉ | 155/520 [09:59<23:30, 3.86s/it] {'loss': 5.3285, 'grad_norm': 0.00019358447229888973, 'learning_rate': 2.4713251357234056, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:59<23:30, 3.86s/it] 30%|███ | 156/520 [10:03<23:24, 3.86s/it] {'loss': 5.5612, 'grad_norm': 0.00021878550954838814, 'learning_rate': 2.4641814145298087, 'epoch': 0.3} + 30%|███ | 156/520 [10:03<23:24, 3.86s/it] 30%|███ | 157/520 [10:07<23:16, 3.85s/it] {'loss': 6.3712, 'grad_norm': 0.0001428767219157746, 'learning_rate': 2.457000230899513, 'epoch': 0.3} + 30%|███ | 157/520 [10:07<23:16, 3.85s/it] 30%|███ | 158/520 [10:10<23:12, 3.85s/it] {'loss': 5.1857, 'grad_norm': 0.00021272153991061955, 'learning_rate': 2.44978186385121, 'epoch': 0.3} + 30%|███ | 158/520 [10:10<23:12, 3.85s/it] 31%|███ | 159/520 [10:14<22:53, 3.81s/it] {'loss': 5.0392, 'grad_norm': 0.00022109210851851507, 'learning_rate': 2.4425265938483207, 'epoch': 0.31} + 31%|███ | 159/520 [10:14<22:53, 3.81s/it] 31%|███ | 160/520 [10:18<22:36, 3.77s/it] {'loss': 5.0789, 'grad_norm': 0.000148158766875665, 'learning_rate': 2.4352347027881005, 'epoch': 0.31} + 31%|███ | 160/520 [10:18<22:36, 3.77s/it] 31%|███ | 161/520 [10:21<22:19, 3.73s/it] {'loss': 5.2466, 'grad_norm': 0.00016875754299286504, 'learning_rate': 2.4279064739906824, 'epoch': 0.31} + 31%|███ | 161/520 [10:21<22:19, 3.73s/it] 31%|███ | 162/520 [10:25<22:09, 3.71s/it] {'loss': 6.2431, 'grad_norm': 0.00011602614008915526, 'learning_rate': 2.420542192188071, 'epoch': 0.31} + 31%|███ | 162/520 [10:25<22:09, 3.71s/it] 31%|███▏ | 163/520 [10:29<21:57, 3.69s/it] {'loss': 4.9167, 'grad_norm': 0.0001510910052136256, 'learning_rate': 2.413142143513081, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:29<21:57, 3.69s/it] 32%|███▏ | 164/520 [10:32<21:49, 3.68s/it] {'loss': 4.7471, 'grad_norm': 0.0001816097047757714, 'learning_rate': 2.4057066154882163, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:32<21:49, 3.68s/it] 32%|███▏ | 165/520 [10:36<21:41, 3.67s/it] {'loss': 5.09, 'grad_norm': 0.0001240412157661367, 'learning_rate': 2.3982358970145006, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:36<21:41, 3.67s/it] 32%|███▏ | 166/520 [10:40<21:34, 3.66s/it] {'loss': 5.0098, 'grad_norm': 0.00012849503928933807, 'learning_rate': 2.390730278360252, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:40<21:34, 3.66s/it] 32%|███▏ | 167/520 [10:43<21:32, 3.66s/it] {'loss': 5.3456, 'grad_norm': 0.0001353637376371405, 'learning_rate': 2.383190051149807, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:43<21:32, 3.66s/it] 32%|███▏ | 168/520 [10:47<21:29, 3.66s/it] {'loss': 5.0245, 'grad_norm': 0.00016277423779763507, 'learning_rate': 2.375615508352185, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:47<21:29, 3.66s/it] 32%|███▎ | 169/520 [10:51<21:23, 3.66s/it] {'loss': 5.2023, 'grad_norm': 0.00014715795043921393, 'learning_rate': 2.368006944269709, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:51<21:23, 3.66s/it] 33%|███▎ | 170/520 [10:54<21:33, 3.69s/it] {'loss': 5.8159, 'grad_norm': 0.00016449066904085713, 'learning_rate': 2.360364654526569, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:54<21:33, 3.69s/it] 33%|███▎ | 171/520 [10:58<21:29, 3.70s/it] {'loss': 4.7764, 'grad_norm': 0.000283896098507083, 'learning_rate': 2.352688936057339, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:58<21:29, 3.70s/it] 33%|███▎ | 172/520 [11:02<21:22, 3.69s/it] {'loss': 5.2103, 'grad_norm': 0.00020758879601212886, 'learning_rate': 2.3449800870954327, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:02<21:22, 3.69s/it] 33%|███▎ | 173/520 [11:05<21:18, 3.68s/it] {'loss': 4.8773, 'grad_norm': 0.0001502036801090635, 'learning_rate': 2.337238407161526, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:05<21:18, 3.68s/it] 33%|███▎ | 174/520 [11:09<21:15, 3.69s/it] {'loss': 5.3609, 'grad_norm': 0.00013438020156015322, 'learning_rate': 2.3294641970519088, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:09<21:15, 3.69s/it] 34%|███▎ | 175/520 [11:13<21:14, 3.69s/it] {'loss': 4.9109, 'grad_norm': 0.00013134874940992668, 'learning_rate': 2.3216577588268072, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:13<21:14, 3.69s/it] 34%|███▍ | 176/520 [11:16<21:05, 3.68s/it] {'loss': 6.3701, 'grad_norm': 9.616413816230672e-05, 'learning_rate': 2.3138193957986393, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:16<21:05, 3.68s/it] 34%|███▍ | 177/520 [11:20<20:56, 3.66s/it] {'loss': 5.7883, 'grad_norm': 0.00017123168342849014, 'learning_rate': 2.3059494125202358, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:20<20:56, 3.66s/it] 34%|███▍ | 178/520 [11:24<20:52, 3.66s/it] {'loss': 5.2343, 'grad_norm': 0.00023846275524677576, 'learning_rate': 2.298048114773005, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:24<20:52, 3.66s/it] 34%|███▍ | 179/520 [11:27<20:50, 3.67s/it] {'loss': 5.1881, 'grad_norm': 0.00013039047730854043, 'learning_rate': 2.290115809555051, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:27<20:50, 3.67s/it] 35%|███▍ | 180/520 [11:31<20:46, 3.67s/it] {'loss': 5.3279, 'grad_norm': 0.00012792116765151243, 'learning_rate': 2.282152805069247, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:31<20:46, 3.67s/it] 35%|███▍ | 181/520 [11:35<21:01, 3.72s/it] {'loss': 4.9951, 'grad_norm': 0.0001298897101884633, 'learning_rate': 2.2741594107112597, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:35<21:01, 3.72s/it] 35%|███▌ | 182/520 [11:39<21:18, 3.78s/it] {'loss': 5.2429, 'grad_norm': 0.00013794764240256392, 'learning_rate': 2.2661359370575287, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:39<21:18, 3.78s/it] 35%|███▌ | 183/520 [11:43<21:27, 3.82s/it] {'loss': 5.0304, 'grad_norm': 0.0001360967181655284, 'learning_rate': 2.2580826958531963, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:43<21:27, 3.82s/it] 35%|███▌ | 184/520 [11:47<21:36, 3.86s/it] {'loss': 4.7678, 'grad_norm': 0.00015015590600853817, 'learning_rate': 2.25, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:47<21:36, 3.86s/it] 36%|███▌ | 185/520 [11:51<21:39, 3.88s/it] {'loss': 5.7581, 'grad_norm': 0.00016395031523663305, 'learning_rate': 2.241888163544111, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:51<21:39, 3.88s/it] 36%|███▌ | 186/520 [11:55<21:41, 3.90s/it] {'loss': 5.0191, 'grad_norm': 0.00016466352691771972, 'learning_rate': 2.233747501663934, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:55<21:41, 3.90s/it] 36%|███▌ | 187/520 [11:59<21:43, 3.91s/it] {'loss': 5.3486, 'grad_norm': 0.0001937511307207995, 'learning_rate': 2.22557833065786, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:59<21:43, 3.91s/it] 36%|███▌ | 188/520 [12:03<21:50, 3.95s/it] {'loss': 4.9541, 'grad_norm': 0.00016955930638194426, 'learning_rate': 2.2173809679319776, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:03<21:50, 3.95s/it] 36%|███▋ | 189/520 [12:07<21:43, 3.94s/it] {'loss': 5.4117, 'grad_norm': 0.00014249847403438202, 'learning_rate': 2.2091557319877406, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:07<21:43, 3.94s/it] 37%|███▋ | 190/520 [12:10<21:33, 3.92s/it] {'loss': 4.9847, 'grad_norm': 0.0001923519025472483, 'learning_rate': 2.200902942409593, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:10<21:33, 3.92s/it] 37%|███▋ | 191/520 [12:14<21:30, 3.92s/it] {'loss': 5.0966, 'grad_norm': 0.00023759046454705933, 'learning_rate': 2.192622919852551, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:14<21:30, 3.92s/it] 37%|███▋ | 192/520 [12:18<21:09, 3.87s/it] {'loss': 5.4598, 'grad_norm': 0.0002782012887215614, 'learning_rate': 2.1843159860297447, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:18<21:09, 3.87s/it] 37%|███▋ | 193/520 [12:22<20:44, 3.81s/it] {'loss': 6.0667, 'grad_norm': 0.00018704217969685668, 'learning_rate': 2.175982463699918, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:22<20:44, 3.81s/it] 37%|███▋ | 194/520 [12:25<20:31, 3.78s/it] {'loss': 5.405, 'grad_norm': 0.0002878014899336089, 'learning_rate': 2.1676226766548883, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:25<20:31, 3.78s/it] 38%|███▊ | 195/520 [12:29<20:19, 3.75s/it] {'loss': 5.0736, 'grad_norm': 0.0002488761438748, 'learning_rate': 2.1592369497069672, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:29<20:19, 3.75s/it] 38%|███▊ | 196/520 [12:33<20:07, 3.73s/it] {'loss': 5.2174, 'grad_norm': 0.0002604045128489337, 'learning_rate': 2.150825608676337, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:33<20:07, 3.73s/it] 38%|███▊ | 197/520 [12:36<19:59, 3.71s/it] {'loss': 4.8995, 'grad_norm': 0.00021738913520378842, 'learning_rate': 2.142388980378394, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:36<19:59, 3.71s/it] 38%|███▊ | 198/520 [12:40<19:48, 3.69s/it] {'loss': 5.4642, 'grad_norm': 0.00021677550571597327, 'learning_rate': 2.1339273926110494, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:40<19:48, 3.69s/it] 38%|███▊ | 199/520 [12:44<19:48, 3.70s/it] {'loss': 5.035, 'grad_norm': 0.00025772740298156334, 'learning_rate': 2.1254411741419923, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:44<19:48, 3.70s/it] 38%|███▊ | 200/520 [12:48<19:45, 3.70s/it] {'loss': 5.7698, 'grad_norm': 0.00014532649537902178, 'learning_rate': 2.116930654695918, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:48<19:45, 3.70s/it] 39%|███▊ | 201/520 [12:51<19:40, 3.70s/it] {'loss': 5.5643, 'grad_norm': 0.00015289759907885752, 'learning_rate': 2.1083961649417127, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:51<19:40, 3.70s/it] 39%|███▉ | 202/520 [12:55<19:34, 3.69s/it] {'loss': 5.2089, 'grad_norm': 0.00020528214494733296, 'learning_rate': 2.0998380364796114, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:55<19:34, 3.69s/it] 39%|███▉ | 203/520 [12:59<19:30, 3.69s/it] {'loss': 5.0574, 'grad_norm': 0.0002099613302841356, 'learning_rate': 2.0912566018283094, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:59<19:30, 3.69s/it] 39%|███▉ | 204/520 [13:02<19:23, 3.68s/it] {'loss': 5.5204, 'grad_norm': 0.00022092885216296287, 'learning_rate': 2.0826521944120424, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:02<19:23, 3.68s/it] 39%|███▉ | 205/520 [13:06<19:22, 3.69s/it] {'loss': 6.0394, 'grad_norm': 0.00024635817254981804, 'learning_rate': 2.074025148547635, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:06<19:22, 3.69s/it] 40%|███▉ | 206/520 [13:10<19:17, 3.69s/it] {'loss': 5.4912, 'grad_norm': 0.00020487243149192174, 'learning_rate': 2.065375799431508, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:10<19:17, 3.69s/it] 40%|███▉ | 207/520 [13:13<19:13, 3.69s/it] {'loss': 5.9823, 'grad_norm': 0.00016346259022758416, 'learning_rate': 2.0567044831266568, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:13<19:13, 3.69s/it] 40%|████ | 208/520 [13:17<19:12, 3.69s/it] {'loss': 5.0249, 'grad_norm': 0.00015749744301356215, 'learning_rate': 2.048011536549593, 'epoch': 0.4} + 40%|████ | 208/520 [13:17<19:12, 3.69s/it] 40%|████ | 209/520 [13:21<19:02, 3.67s/it] {'loss': 5.239, 'grad_norm': 0.00014717506639911238, 'learning_rate': 2.039297297457251, 'epoch': 0.4} + 40%|████ | 209/520 [13:21<19:02, 3.67s/it] 40%|████ | 210/520 [13:24<19:05, 3.69s/it] {'loss': 5.1569, 'grad_norm': 0.00019313601369936172, 'learning_rate': 2.030562104433872, 'epoch': 0.4} + 40%|████ | 210/520 [13:24<19:05, 3.69s/it] 41%|████ | 211/520 [13:28<19:01, 3.69s/it] {'loss': 5.1733, 'grad_norm': 0.0001407681573087386, 'learning_rate': 2.0218062968778407, 'epoch': 0.41} + 41%|████ | 211/520 [13:28<19:01, 3.69s/it] 41%|████ | 212/520 [13:32<18:59, 3.70s/it] {'loss': 4.8164, 'grad_norm': 0.00012080217681593266, 'learning_rate': 2.013030214988503, 'epoch': 0.41} + 41%|████ | 212/520 [13:32<18:59, 3.70s/it] 41%|████ | 213/520 [13:35<18:51, 3.69s/it] {'loss': 5.4952, 'grad_norm': 0.00010912550406794636, 'learning_rate': 2.0042341997529465, 'epoch': 0.41} + 41%|████ | 213/520 [13:35<18:51, 3.69s/it] 41%|████ | 214/520 [13:39<18:49, 3.69s/it] {'loss': 5.2654, 'grad_norm': 0.00010812457225333575, 'learning_rate': 1.9954185929327508, 'epoch': 0.41} + 41%|████ | 214/520 [13:39<18:49, 3.69s/it] 41%|████▏ | 215/520 [13:43<18:43, 3.68s/it] {'loss': 5.6438, 'grad_norm': 9.969137628910508e-05, 'learning_rate': 1.9865837370507107, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:43<18:43, 3.68s/it] 42%|████▏ | 216/520 [13:47<18:37, 3.68s/it] {'loss': 4.9448, 'grad_norm': 0.00011003409438603829, 'learning_rate': 1.9777299753775268, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:47<18:37, 3.68s/it] 42%|████▏ | 217/520 [13:50<18:32, 3.67s/it] {'loss': 5.1306, 'grad_norm': 0.0001031685938679705, 'learning_rate': 1.9688576519184668, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:50<18:32, 3.67s/it] 42%|████▏ | 218/520 [13:54<18:31, 3.68s/it] {'loss': 5.4531, 'grad_norm': 0.00010695310971750536, 'learning_rate': 1.9599671114000015, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:54<18:31, 3.68s/it] 42%|████▏ | 219/520 [13:58<18:26, 3.68s/it] {'loss': 4.6723, 'grad_norm': 0.00010575686048564569, 'learning_rate': 1.9510586992564094, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:58<18:26, 3.68s/it] 42%|████▏ | 220/520 [14:01<18:26, 3.69s/it] {'loss': 5.8862, 'grad_norm': 0.00012383112190854812, 'learning_rate': 1.9421327616163564, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:01<18:26, 3.69s/it] 42%|████▎ | 221/520 [14:05<18:23, 3.69s/it] {'loss': 5.0128, 'grad_norm': 0.00011924628823416308, 'learning_rate': 1.933189645289445, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:05<18:23, 3.69s/it] 43%|████▎ | 222/520 [14:09<18:21, 3.70s/it] {'loss': 4.7088, 'grad_norm': 0.00010668925453914258, 'learning_rate': 1.9242296977527413, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:09<18:21, 3.70s/it] 43%|████▎ | 223/520 [14:12<18:14, 3.69s/it] {'loss': 4.7656, 'grad_norm': 0.00011011945262366054, 'learning_rate': 1.915253267137274, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:12<18:14, 3.69s/it] 43%|████▎ | 224/520 [14:16<18:26, 3.74s/it] {'loss': 7.3425, 'grad_norm': 0.00012980662980965207, 'learning_rate': 1.906260702214508, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:16<18:26, 3.74s/it] 43%|████▎ | 225/520 [14:20<18:34, 3.78s/it] {'loss': 4.9909, 'grad_norm': 0.0002211885439841858, 'learning_rate': 1.8972523523827909, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:20<18:34, 3.78s/it] 43%|████▎ | 226/520 [14:24<18:27, 3.77s/it] {'loss': 5.3258, 'grad_norm': 0.00017558744690328258, 'learning_rate': 1.888228567653781, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:24<18:27, 3.77s/it] 44%|████▎ | 227/520 [14:28<18:36, 3.81s/it] {'loss': 5.0463, 'grad_norm': 0.00021735888747021468, 'learning_rate': 1.879189698638846, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:28<18:36, 3.81s/it] 44%|████▍ | 228/520 [14:32<18:49, 3.87s/it] {'loss': 6.487, 'grad_norm': 0.0001360786653843839, 'learning_rate': 1.87013609653544, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:32<18:49, 3.87s/it] 44%|████▍ | 229/520 [14:35<18:33, 3.83s/it] {'loss': 5.0743, 'grad_norm': 0.00011815842112855736, 'learning_rate': 1.8610681131134597, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:35<18:33, 3.83s/it] 44%|████▍ | 230/520 [14:39<18:22, 3.80s/it] {'loss': 5.0001, 'grad_norm': 0.00018956546279992769, 'learning_rate': 1.851986100701573, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:39<18:22, 3.80s/it] 44%|████▍ | 231/520 [14:43<18:12, 3.78s/it] {'loss': 5.1613, 'grad_norm': 0.00010197923077932681, 'learning_rate': 1.8428904121735346, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:43<18:12, 3.78s/it] 45%|████▍ | 232/520 [14:47<18:07, 3.78s/it] {'loss': 6.6718, 'grad_norm': 8.24678481594792e-05, 'learning_rate': 1.8337814009344715, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:47<18:07, 3.78s/it] 45%|████▍ | 233/520 [14:50<17:52, 3.74s/it] {'loss': 5.987, 'grad_norm': 9.879717637876669e-05, 'learning_rate': 1.8246594209071543, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:50<17:52, 3.74s/it] 45%|████▌ | 234/520 [14:54<17:45, 3.73s/it] {'loss': 4.7099, 'grad_norm': 0.00012497893945007555, 'learning_rate': 1.8155248265182438, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:54<17:45, 3.73s/it] 45%|████▌ | 235/520 [14:58<17:37, 3.71s/it] {'loss': 5.0612, 'grad_norm': 8.78848636543832e-05, 'learning_rate': 1.8063779726845206, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:58<17:37, 3.71s/it] 45%|████▌ | 236/520 [15:01<17:34, 3.71s/it] {'loss': 5.7278, 'grad_norm': 0.00010446393666101492, 'learning_rate': 1.7972192147990964, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:01<17:34, 3.71s/it] 46%|████▌ | 237/520 [15:05<17:25, 3.70s/it] {'loss': 5.2099, 'grad_norm': 0.00010912158790297518, 'learning_rate': 1.7880489087176046, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:05<17:25, 3.70s/it] 46%|████▌ | 238/520 [15:09<17:23, 3.70s/it] {'loss': 4.871, 'grad_norm': 9.050568650155337e-05, 'learning_rate': 1.7788674107443723, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:09<17:23, 3.70s/it] 46%|████▌ | 239/520 [15:12<17:16, 3.69s/it] {'loss': 5.6179, 'grad_norm': 8.97381610513362e-05, 'learning_rate': 1.769675077618579, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:12<17:16, 3.69s/it] 46%|████▌ | 240/520 [15:16<17:09, 3.68s/it] {'loss': 4.4769, 'grad_norm': 0.0001304510073530752, 'learning_rate': 1.7604722665003958, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:16<17:09, 3.68s/it] 46%|████▋ | 241/520 [15:20<17:03, 3.67s/it] {'loss': 4.8169, 'grad_norm': 0.0001114685385544892, 'learning_rate': 1.7512593349571046, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:20<17:03, 3.67s/it] 47%|████▋ | 242/520 [15:23<17:04, 3.69s/it] {'loss': 5.0434, 'grad_norm': 0.00010795593254039529, 'learning_rate': 1.74203664094921, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:23<17:04, 3.69s/it] 47%|████▋ | 243/520 [15:27<17:00, 3.68s/it] {'loss': 4.7697, 'grad_norm': 0.00010897644558331717, 'learning_rate': 1.7328045428165273, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:27<17:00, 3.68s/it] 47%|████▋ | 244/520 [15:31<16:58, 3.69s/it] {'loss': 5.4017, 'grad_norm': 0.00025637233183707636, 'learning_rate': 1.7235633992642616, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:31<16:58, 3.69s/it] 47%|████▋ | 245/520 [15:35<16:54, 3.69s/it] {'loss': 4.7657, 'grad_norm': 0.00024317275214788125, 'learning_rate': 1.71431356934907, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:35<16:54, 3.69s/it] 47%|████▋ | 246/520 [15:38<16:54, 3.70s/it] {'loss': 6.363, 'grad_norm': 6.717764427523628e-05, 'learning_rate': 1.7050554124651103, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:38<16:54, 3.70s/it] 48%|████▊ | 247/520 [15:42<16:44, 3.68s/it] {'loss': 5.6743, 'grad_norm': 0.0002596650159958795, 'learning_rate': 1.6957892883300776, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:42<16:44, 3.68s/it] 48%|████▊ | 248/520 [15:46<16:37, 3.67s/it] {'loss': 4.8343, 'grad_norm': 0.0001785352723442783, 'learning_rate': 1.686515556971228, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:46<16:37, 3.67s/it] 48%|████▊ | 249/520 [15:49<16:34, 3.67s/it] {'loss': 5.3759, 'grad_norm': 0.0002597124035159682, 'learning_rate': 1.6772345787113894, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:49<16:34, 3.67s/it] 48%|████▊ | 250/520 [15:53<16:30, 3.67s/it] {'loss': 5.1968, 'grad_norm': 0.00011738311715358291, 'learning_rate': 1.6679467141549618, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:53<16:30, 3.67s/it] 48%|████▊ | 251/520 [15:57<16:29, 3.68s/it] {'loss': 5.471, 'grad_norm': 0.0005272914369851846, 'learning_rate': 1.6586523241739068, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:57<16:29, 3.68s/it] 48%|████▊ | 252/520 [16:00<16:27, 3.68s/it] {'loss': 5.9521, 'grad_norm': 0.000325443702228404, 'learning_rate': 1.649351769893725, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:00<16:27, 3.68s/it] 49%|████▊ | 253/520 [16:04<16:25, 3.69s/it] {'loss': 5.4758, 'grad_norm': 0.00011637325853104537, 'learning_rate': 1.640045412679426, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:04<16:25, 3.69s/it] 49%|████▉ | 254/520 [16:08<16:19, 3.68s/it] {'loss': 4.9611, 'grad_norm': 0.00013394984121649538, 'learning_rate': 1.6307336141214877, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:08<16:19, 3.68s/it] 49%|████▉ | 255/520 [16:11<16:14, 3.68s/it] {'loss': 5.171, 'grad_norm': 0.00011458101171361773, 'learning_rate': 1.621416736021805, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:11<16:14, 3.68s/it] 49%|████▉ | 256/520 [16:15<16:09, 3.67s/it] {'loss': 5.1739, 'grad_norm': 0.0001693069046083619, 'learning_rate': 1.6120951403796364, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:15<16:09, 3.67s/it] 49%|████▉ | 257/520 [16:19<16:05, 3.67s/it] {'loss': 5.1669, 'grad_norm': 7.150489066975044e-05, 'learning_rate': 1.6027691893775349, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:19<16:05, 3.67s/it] 50%|████▉ | 258/520 [16:22<16:00, 3.66s/it] {'loss': 5.1868, 'grad_norm': 0.00015217572895793888, 'learning_rate': 1.5934392453672783, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:22<16:00, 3.66s/it] 50%|████▉ | 259/520 [16:26<15:54, 3.66s/it] {'loss': 5.5343, 'grad_norm': 9.754877989055831e-05, 'learning_rate': 1.5841056708557877, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:26<15:54, 3.66s/it] 50%|█████ | 260/520 [16:30<15:52, 3.66s/it] {'loss': 6.0883, 'grad_norm': 0.00010114850238936993, 'learning_rate': 1.5747688284910457, 'epoch': 0.5} + 50%|█████ | 260/520 [16:30<15:52, 3.66s/it] 50%|█████ | 261/520 [16:33<15:48, 3.66s/it] {'loss': 6.1248, 'grad_norm': 7.853220050502762e-05, 'learning_rate': 1.5654290810480043, 'epoch': 0.5} + 50%|█████ | 261/520 [16:33<15:48, 3.66s/it] 50%|█████ | 262/520 [16:37<15:43, 3.66s/it] {'loss': 5.0287, 'grad_norm': 9.361291051709207e-05, 'learning_rate': 1.5560867914144887, 'epoch': 0.5} + 50%|█████ | 262/520 [16:37<15:43, 3.66s/it] 51%|█████ | 263/520 [16:41<15:38, 3.65s/it] {'loss': 6.212, 'grad_norm': 8.86168975828664e-05, 'learning_rate': 1.5467423225770998, 'epoch': 0.51} + 51%|█████ | 263/520 [16:41<15:38, 3.65s/it] 51%|█████ | 264/520 [16:44<15:37, 3.66s/it] {'loss': 5.3405, 'grad_norm': 0.00010237791625747394, 'learning_rate': 1.5373960376071094, 'epoch': 0.51} + 51%|█████ | 264/520 [16:44<15:37, 3.66s/it] 51%|█████ | 265/520 [16:48<15:34, 3.67s/it] {'loss': 5.1744, 'grad_norm': 9.100341100396312e-05, 'learning_rate': 1.5280482996463534, 'epoch': 0.51} + 51%|█████ | 265/520 [16:48<15:34, 3.67s/it] 51%|█████ | 266/520 [16:52<15:28, 3.66s/it] {'loss': 4.2874, 'grad_norm': 0.00011214542536601881, 'learning_rate': 1.5186994718931226, 'epoch': 0.51} + 51%|█████ | 266/520 [16:52<15:28, 3.66s/it] 51%|█████▏ | 267/520 [16:55<15:28, 3.67s/it] {'loss': 4.9741, 'grad_norm': 0.00011988378739406415, 'learning_rate': 1.5093499175880503, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:55<15:28, 3.67s/it] 52%|█████▏ | 268/520 [16:59<15:24, 3.67s/it] {'loss': 6.5597, 'grad_norm': 0.00011476224776277301, 'learning_rate': 1.5, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:59<15:24, 3.67s/it] 52%|█████▏ | 269/520 [17:03<15:19, 3.66s/it] {'loss': 5.3603, 'grad_norm': 0.0001544816668343211, 'learning_rate': 1.4906500824119497, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:03<15:19, 3.66s/it] 52%|█████▏ | 270/520 [17:06<15:24, 3.70s/it] {'loss': 5.6166, 'grad_norm': 0.0002419420395664031, 'learning_rate': 1.4813005281068774, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:06<15:24, 3.70s/it] 52%|█████▏ | 271/520 [17:10<15:24, 3.71s/it] {'loss': 5.7489, 'grad_norm': 0.00015010767120188478, 'learning_rate': 1.4719517003536469, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:10<15:24, 3.71s/it] 52%|█████▏ | 272/520 [17:14<15:36, 3.78s/it] {'loss': 6.2147, 'grad_norm': 0.0003030653723866536, 'learning_rate': 1.4626039623928908, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:14<15:36, 3.78s/it] 52%|█████▎ | 273/520 [17:18<15:41, 3.81s/it] {'loss': 6.295, 'grad_norm': 0.00018478366486425905, 'learning_rate': 1.4532576774229007, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:18<15:41, 3.81s/it] 53%|█████▎ | 274/520 [17:22<15:46, 3.85s/it] {'loss': 4.9757, 'grad_norm': 0.00015080363473635985, 'learning_rate': 1.4439132085855118, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:22<15:46, 3.85s/it] 53%|█████▎ | 275/520 [17:26<15:51, 3.88s/it] {'loss': 5.0396, 'grad_norm': 0.00018331943240457372, 'learning_rate': 1.4345709189519962, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:26<15:51, 3.88s/it] 53%|█████▎ | 276/520 [17:30<15:47, 3.88s/it] {'loss': 5.5958, 'grad_norm': 7.959393537170022e-05, 'learning_rate': 1.425231171508954, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:30<15:47, 3.88s/it] 53%|█████▎ | 277/520 [17:34<15:39, 3.87s/it] {'loss': 6.2509, 'grad_norm': 8.500040663367593e-05, 'learning_rate': 1.415894329144212, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:34<15:39, 3.87s/it] 53%|█████▎ | 278/520 [17:37<15:21, 3.81s/it] {'loss': 4.4193, 'grad_norm': 7.887968524189337e-05, 'learning_rate': 1.406560754632722, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:37<15:21, 3.81s/it] 54%|█████▎ | 279/520 [17:41<15:04, 3.75s/it] {'loss': 5.739, 'grad_norm': 9.294997049472447e-05, 'learning_rate': 1.3972308106224651, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:41<15:04, 3.75s/it] 54%|█████▍ | 280/520 [17:45<14:55, 3.73s/it] {'loss': 4.8983, 'grad_norm': 8.584180757487844e-05, 'learning_rate': 1.3879048596203636, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:45<14:55, 3.73s/it] 54%|█████▍ | 281/520 [17:48<14:47, 3.71s/it] {'loss': 5.3591, 'grad_norm': 6.826848216764272e-05, 'learning_rate': 1.378583263978195, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:48<14:47, 3.71s/it] 54%|█████▍ | 282/520 [17:52<14:48, 3.73s/it] {'loss': 4.539, 'grad_norm': 7.97722508596785e-05, 'learning_rate': 1.3692663858785126, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:52<14:48, 3.73s/it] 54%|█████▍ | 283/520 [17:56<14:42, 3.72s/it] {'loss': 5.5315, 'grad_norm': 9.76625192173396e-05, 'learning_rate': 1.359954587320574, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:56<14:42, 3.72s/it] 55%|█████▍ | 284/520 [17:59<14:37, 3.72s/it] {'loss': 5.9748, 'grad_norm': 8.319014234481894e-05, 'learning_rate': 1.3506482301062752, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:59<14:37, 3.72s/it] 55%|█████▍ | 285/520 [18:03<14:30, 3.70s/it] {'loss': 4.9897, 'grad_norm': 0.00010051793698493226, 'learning_rate': 1.3413476758260936, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:03<14:30, 3.70s/it] 55%|█████▌ | 286/520 [18:07<14:24, 3.70s/it] {'loss': 4.7032, 'grad_norm': 8.73836729288533e-05, 'learning_rate': 1.3320532858450382, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:07<14:24, 3.70s/it] 55%|█████▌ | 287/520 [18:10<14:19, 3.69s/it] {'loss': 5.0607, 'grad_norm': 0.0001221232090723763, 'learning_rate': 1.3227654212886109, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:10<14:19, 3.69s/it] 55%|█████▌ | 288/520 [18:14<14:22, 3.72s/it] {'loss': 5.8505, 'grad_norm': 0.00012322353993549513, 'learning_rate': 1.3134844430287727, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:14<14:22, 3.72s/it] 56%|█████▌ | 289/520 [18:18<14:18, 3.71s/it] {'loss': 4.9214, 'grad_norm': 0.0001449712889913552, 'learning_rate': 1.3042107116699229, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:18<14:18, 3.71s/it] 56%|█████▌ | 290/520 [18:22<14:11, 3.70s/it] {'loss': 4.6913, 'grad_norm': 7.510975116407326e-05, 'learning_rate': 1.2949445875348902, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:22<14:11, 3.70s/it] 56%|█████▌ | 291/520 [18:25<14:11, 3.72s/it] {'loss': 4.7852, 'grad_norm': 0.00012356219771798667, 'learning_rate': 1.2856864306509301, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:25<14:11, 3.72s/it] 56%|█████▌ | 292/520 [18:29<14:03, 3.70s/it] {'loss': 5.3783, 'grad_norm': 6.926075621170698e-05, 'learning_rate': 1.2764366007357382, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:29<14:03, 3.70s/it] 56%|█████▋ | 293/520 [18:33<13:57, 3.69s/it] {'loss': 4.9503, 'grad_norm': 8.853165425525929e-05, 'learning_rate': 1.2671954571834725, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:33<13:57, 3.69s/it] 57%|█████▋ | 294/520 [18:36<13:54, 3.69s/it] {'loss': 5.2022, 'grad_norm': 9.212046368625896e-05, 'learning_rate': 1.25796335905079, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:36<13:54, 3.69s/it] 57%|█████▋ | 295/520 [18:40<14:02, 3.75s/it] {'loss': 6.2195, 'grad_norm': 0.00010646669515733021, 'learning_rate': 1.2487406650428956, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:40<14:02, 3.75s/it] 57%|█████▋ | 296/520 [18:44<14:04, 3.77s/it] {'loss': 4.6209, 'grad_norm': 7.939086665486758e-05, 'learning_rate': 1.2395277334996044, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:44<14:04, 3.77s/it] 57%|█████▋ | 297/520 [18:48<13:51, 3.73s/it] {'loss': 5.3697, 'grad_norm': 6.263015083289487e-05, 'learning_rate': 1.230324922381421, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:48<13:51, 3.73s/it] 57%|█████▋ | 298/520 [18:51<13:40, 3.69s/it] {'loss': 5.1586, 'grad_norm': 0.000118211110426385, 'learning_rate': 1.2211325892556282, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:51<13:40, 3.69s/it] 57%|█████▊ | 299/520 [18:55<13:35, 3.69s/it] {'loss': 6.1639, 'grad_norm': 7.343425743690732e-05, 'learning_rate': 1.2119510912823959, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:55<13:35, 3.69s/it] 58%|█████▊ | 300/520 [18:59<13:32, 3.69s/it] {'loss': 5.4664, 'grad_norm': 8.60420389893804e-05, 'learning_rate': 1.202780785200904, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:59<13:32, 3.69s/it] 58%|█████▊ | 301/520 [19:02<13:29, 3.70s/it] {'loss': 5.2027, 'grad_norm': 8.625703689269021e-05, 'learning_rate': 1.1936220273154796, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:02<13:29, 3.70s/it] 58%|█████▊ | 302/520 [19:06<13:29, 3.71s/it] {'loss': 6.1928, 'grad_norm': 7.673958002409898e-05, 'learning_rate': 1.1844751734817565, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:06<13:29, 3.71s/it] 58%|█████▊ | 303/520 [19:10<13:26, 3.72s/it] {'loss': 4.9271, 'grad_norm': 0.00010822999439124567, 'learning_rate': 1.1753405790928457, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:10<13:26, 3.72s/it] 58%|█████▊ | 304/520 [19:14<13:22, 3.71s/it] {'loss': 5.8569, 'grad_norm': 0.00017144835993748896, 'learning_rate': 1.1662185990655285, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:14<13:22, 3.71s/it] 59%|█████▊ | 305/520 [19:17<13:14, 3.70s/it] {'loss': 5.6944, 'grad_norm': 6.654451586273701e-05, 'learning_rate': 1.157109587826466, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:17<13:14, 3.70s/it] 59%|█████▉ | 306/520 [19:21<13:09, 3.69s/it] {'loss': 5.4307, 'grad_norm': 9.410982618914985e-05, 'learning_rate': 1.1480138992984275, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:21<13:09, 3.69s/it] 59%|█████▉ | 307/520 [19:25<13:32, 3.81s/it] {'loss': 5.0615, 'grad_norm': 9.402811974240836e-05, 'learning_rate': 1.1389318868865408, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:25<13:32, 3.81s/it] 59%|█████▉ | 308/520 [19:29<13:18, 3.77s/it] {'loss': 5.3465, 'grad_norm': 9.822832930826564e-05, 'learning_rate': 1.1298639034645594, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:29<13:18, 3.77s/it] 59%|█████▉ | 309/520 [19:32<13:07, 3.73s/it] {'loss': 4.996, 'grad_norm': 9.38718582282436e-05, 'learning_rate': 1.1208103013611534, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:32<13:07, 3.73s/it] 60%|█████▉ | 310/520 [19:36<13:05, 3.74s/it] {'loss': 5.0448, 'grad_norm': 6.933082742011345e-05, 'learning_rate': 1.1117714323462187, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:36<13:05, 3.74s/it] 60%|█████▉ | 311/520 [19:40<12:54, 3.71s/it] {'loss': 5.0712, 'grad_norm': 6.906940867454897e-05, 'learning_rate': 1.1027476476172091, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:40<12:54, 3.71s/it] 60%|██████ | 312/520 [19:43<12:47, 3.69s/it] {'loss': 4.8503, 'grad_norm': 7.474678001894792e-05, 'learning_rate': 1.0937392977854925, 'epoch': 0.6} + 60%|██████ | 312/520 [19:43<12:47, 3.69s/it] 60%|██████ | 313/520 [19:47<12:42, 3.68s/it] {'loss': 4.3719, 'grad_norm': 7.223776426499881e-05, 'learning_rate': 1.084746732862726, 'epoch': 0.6} + 60%|██████ | 313/520 [19:47<12:42, 3.68s/it] 60%|██████ | 314/520 [19:51<13:03, 3.81s/it] {'loss': 5.085, 'grad_norm': 9.78455087684311e-05, 'learning_rate': 1.0757703022472587, 'epoch': 0.6} + 60%|██████ | 314/520 [19:51<13:03, 3.81s/it] 61%|██████ | 315/520 [19:55<12:55, 3.78s/it] {'loss': 6.2322, 'grad_norm': 8.66548045131287e-05, 'learning_rate': 1.0668103547105554, 'epoch': 0.61} + 61%|██████ | 315/520 [19:55<12:55, 3.78s/it] 61%|██████ | 316/520 [19:59<13:12, 3.88s/it] {'loss': 4.9114, 'grad_norm': 8.693224785312091e-05, 'learning_rate': 1.0578672383836436, 'epoch': 0.61} + 61%|██████ | 316/520 [19:59<13:12, 3.88s/it] 61%|██████ | 317/520 [20:03<12:54, 3.81s/it] {'loss': 4.4374, 'grad_norm': 0.00014944578383903578, 'learning_rate': 1.0489413007435906, 'epoch': 0.61} + 61%|██████ | 317/520 [20:03<12:54, 3.81s/it] 61%|██████ | 318/520 [20:06<12:43, 3.78s/it] {'loss': 5.584, 'grad_norm': 0.00015741529287103653, 'learning_rate': 1.0400328885999988, 'epoch': 0.61} + 61%|██████ | 318/520 [20:06<12:43, 3.78s/it] 61%|██████▏ | 319/520 [20:10<13:00, 3.88s/it] {'loss': 4.7785, 'grad_norm': 6.779948463836935e-05, 'learning_rate': 1.0311423480815334, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:10<13:00, 3.88s/it] 62%|██████▏ | 320/520 [20:14<12:42, 3.81s/it] {'loss': 5.0005, 'grad_norm': 0.00012208126321420245, 'learning_rate': 1.0222700246224736, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:14<12:42, 3.81s/it] 62%|██████▏ | 321/520 [20:18<12:33, 3.79s/it] {'loss': 5.1927, 'grad_norm': 6.694013703864563e-05, 'learning_rate': 1.0134162629492895, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:18<12:33, 3.79s/it] 62%|██████▏ | 322/520 [20:21<12:22, 3.75s/it] {'loss': 5.9237, 'grad_norm': 0.0001279187219579817, 'learning_rate': 1.0045814070672499, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:21<12:22, 3.75s/it] 62%|██████▏ | 323/520 [20:25<12:17, 3.74s/it] {'loss': 6.0376, 'grad_norm': 0.00014133127339781068, 'learning_rate': 0.9957658002470542, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:25<12:17, 3.74s/it] 62%|██████▏ | 324/520 [20:29<12:08, 3.72s/it] {'loss': 5.0033, 'grad_norm': 5.4099696612322485e-05, 'learning_rate': 0.986969785011497, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:29<12:08, 3.72s/it] 62%|██████▎ | 325/520 [20:32<12:02, 3.70s/it] {'loss': 5.2737, 'grad_norm': 7.74811992670672e-05, 'learning_rate': 0.978193703122159, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:32<12:02, 3.70s/it] 63%|██████▎ | 326/520 [20:36<11:58, 3.70s/it] {'loss': 5.2392, 'grad_norm': 0.00010143165017724657, 'learning_rate': 0.9694378955661279, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:36<11:58, 3.70s/it] 63%|██████▎ | 327/520 [20:40<11:52, 3.69s/it] {'loss': 6.1315, 'grad_norm': 9.2218284512734e-05, 'learning_rate': 0.9607027025427487, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:40<11:52, 3.69s/it] 63%|██████▎ | 328/520 [20:44<11:49, 3.70s/it] {'loss': 5.4775, 'grad_norm': 0.0001520740640318299, 'learning_rate': 0.9519884634504074, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:44<11:49, 3.70s/it] 63%|██████▎ | 329/520 [20:47<11:47, 3.71s/it] {'loss': 4.5995, 'grad_norm': 0.00019619484194496512, 'learning_rate': 0.9432955168733431, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:47<11:47, 3.71s/it] 63%|██████▎ | 330/520 [20:51<11:45, 3.71s/it] {'loss': 5.1112, 'grad_norm': 0.00013269221128738164, 'learning_rate': 0.9346242005684922, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:51<11:45, 3.71s/it] 64%|██████▎ | 331/520 [20:55<11:44, 3.73s/it] {'loss': 5.093, 'grad_norm': 8.479658570716824e-05, 'learning_rate': 0.9259748514523654, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:55<11:44, 3.73s/it] 64%|██████▍ | 332/520 [20:58<11:38, 3.71s/it] {'loss': 6.1275, 'grad_norm': 8.665602219304382e-05, 'learning_rate': 0.917347805587958, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:58<11:38, 3.71s/it] 64%|██████▍ | 333/520 [21:02<11:32, 3.70s/it] {'loss': 5.6551, 'grad_norm': 5.867298530353743e-05, 'learning_rate': 0.9087433981716911, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:02<11:32, 3.70s/it] 64%|██████▍ | 334/520 [21:06<11:31, 3.72s/it] {'loss': 5.007, 'grad_norm': 7.460699239108669e-05, 'learning_rate': 0.9001619635203888, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:06<11:31, 3.72s/it] 64%|██████▍ | 335/520 [21:10<11:25, 3.71s/it] {'loss': 5.1224, 'grad_norm': 0.00010244078448567079, 'learning_rate': 0.8916038350582877, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:10<11:25, 3.71s/it] 65%|██████▍ | 336/520 [21:13<11:22, 3.71s/it] {'loss': 4.9368, 'grad_norm': 0.0001103820414545449, 'learning_rate': 0.883069345304083, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:13<11:22, 3.71s/it] 65%|██████▍ | 337/520 [21:17<11:19, 3.72s/it] {'loss': 5.0056, 'grad_norm': 0.00011561546555512764, 'learning_rate': 0.8745588258580084, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:17<11:19, 3.72s/it] 65%|██████▌ | 338/520 [21:21<11:13, 3.70s/it] {'loss': 5.0287, 'grad_norm': 0.00010472077283492976, 'learning_rate': 0.8660726073889511, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:21<11:13, 3.70s/it] 65%|██████▌ | 339/520 [21:24<11:08, 3.69s/it] {'loss': 5.2851, 'grad_norm': 6.673300988285618e-05, 'learning_rate': 0.8576110196216057, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:24<11:08, 3.69s/it] 65%|██████▌ | 340/520 [21:28<11:06, 3.70s/it] {'loss': 4.8555, 'grad_norm': 9.388969391895137e-05, 'learning_rate': 0.8491743913236629, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:28<11:06, 3.70s/it] 66%|██████▌ | 341/520 [21:32<11:03, 3.71s/it] {'loss': 5.088, 'grad_norm': 7.699653185957886e-05, 'learning_rate': 0.8407630502930323, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:32<11:03, 3.71s/it] 66%|██████▌ | 342/520 [21:35<10:58, 3.70s/it] {'loss': 6.3537, 'grad_norm': 8.010257566866785e-05, 'learning_rate': 0.8323773233451114, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:35<10:58, 3.70s/it] 66%|██████▌ | 343/520 [21:39<10:52, 3.69s/it] {'loss': 5.9605, 'grad_norm': 7.344042565208499e-05, 'learning_rate': 0.8240175363000819, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:39<10:52, 3.69s/it] 66%|██████▌ | 344/520 [21:43<10:50, 3.70s/it] {'loss': 4.7105, 'grad_norm': 0.00014187292310170724, 'learning_rate': 0.8156840139702555, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:43<10:50, 3.70s/it] 66%|██████▋ | 345/520 [21:47<10:48, 3.71s/it] {'loss': 5.1676, 'grad_norm': 0.0001458048640313294, 'learning_rate': 0.8073770801474495, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:47<10:48, 3.71s/it] 67%|██████▋ | 346/520 [21:50<10:42, 3.69s/it] {'loss': 6.0556, 'grad_norm': 9.358179478034329e-05, 'learning_rate': 0.799097057590407, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:50<10:42, 3.69s/it] 67%|██████▋ | 347/520 [21:54<10:42, 3.71s/it] {'loss': 4.5095, 'grad_norm': 9.715023626646546e-05, 'learning_rate': 0.7908442680122597, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:54<10:42, 3.71s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:58<10:36, 3.70s/it] {'loss': 5.2461, 'grad_norm': 0.0001446897072760523, 'learning_rate': 0.7826190320680231, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:58<10:36, 3.70s/it] 67%|██████▋ | 349/520 [22:01<10:31, 3.69s/it] {'loss': 5.4162, 'grad_norm': 0.00015603762090753788, 'learning_rate': 0.7744216693421403, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:01<10:31, 3.69s/it] 67%|██████▋ | 350/520 [22:05<10:27, 3.69s/it] {'loss': 5.1123, 'grad_norm': 7.950609469945827e-05, 'learning_rate': 0.7662524983360666, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:05<10:27, 3.69s/it] 68%|██████▊ | 351/520 [22:09<10:25, 3.70s/it] {'loss': 4.7003, 'grad_norm': 9.801441853517031e-05, 'learning_rate': 0.7581118364558889, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:09<10:25, 3.70s/it] 68%|██████▊ | 352/520 [22:13<10:25, 3.72s/it] {'loss': 5.1969, 'grad_norm': 0.00018083912598772483, 'learning_rate': 0.7500000000000003, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:13<10:25, 3.72s/it] 68%|██████▊ | 353/520 [22:16<10:30, 3.78s/it] {'loss': 5.6518, 'grad_norm': 8.078748573425005e-05, 'learning_rate': 0.7419173041468043, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:16<10:30, 3.78s/it] 68%|██████▊ | 354/520 [22:20<10:34, 3.82s/it] {'loss': 6.1693, 'grad_norm': 5.710344744914135e-05, 'learning_rate': 0.733864062942472, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:20<10:34, 3.82s/it] 68%|██████▊ | 355/520 [22:24<10:31, 3.83s/it] {'loss': 4.8882, 'grad_norm': 0.00019369532798370264, 'learning_rate': 0.7258405892887398, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:24<10:31, 3.83s/it] 68%|██████▊ | 356/520 [22:28<10:20, 3.79s/it] {'loss': 5.0401, 'grad_norm': 8.521453616083261e-05, 'learning_rate': 0.717847194930753, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:28<10:20, 3.79s/it] 69%|██████▊ | 357/520 [22:32<10:12, 3.76s/it] {'loss': 4.5765, 'grad_norm': 0.00012252980269743594, 'learning_rate': 0.7098841904449489, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:32<10:12, 3.76s/it] 69%|██████▉ | 358/520 [22:35<10:05, 3.74s/it] {'loss': 4.8733, 'grad_norm': 0.00010689427801085159, 'learning_rate': 0.7019518852269953, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:35<10:05, 3.74s/it] 69%|██████▉ | 359/520 [22:39<09:58, 3.72s/it] {'loss': 5.9116, 'grad_norm': 8.481194627055016e-05, 'learning_rate': 0.694050587479764, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:39<09:58, 3.72s/it] 69%|██████▉ | 360/520 [22:43<09:53, 3.71s/it] {'loss': 6.18, 'grad_norm': 0.00010580673677568922, 'learning_rate': 0.686180604201361, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:43<09:53, 3.71s/it] 69%|██████▉ | 361/520 [22:46<09:51, 3.72s/it] {'loss': 6.0489, 'grad_norm': 0.00010603220109375796, 'learning_rate': 0.6783422411731932, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:46<09:51, 3.72s/it] 70%|██████▉ | 362/520 [22:50<09:45, 3.71s/it] {'loss': 4.834, 'grad_norm': 0.00017333363312669686, 'learning_rate': 0.6705358029480908, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:50<09:45, 3.71s/it] 70%|██████▉ | 363/520 [22:54<09:41, 3.70s/it] {'loss': 5.2921, 'grad_norm': 0.0001327751414302482, 'learning_rate': 0.6627615928384742, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:54<09:41, 3.70s/it] 70%|███████ | 364/520 [22:58<09:39, 3.72s/it] {'loss': 6.1077, 'grad_norm': 8.27494641874967e-05, 'learning_rate': 0.6550199129045668, 'epoch': 0.7} + 70%|███████ | 364/520 [22:58<09:39, 3.72s/it] 70%|███████ | 365/520 [23:01<09:36, 3.72s/it] {'loss': 5.4207, 'grad_norm': 8.52301574186699e-05, 'learning_rate': 0.6473110639426617, 'epoch': 0.7} + 70%|███████ | 365/520 [23:01<09:36, 3.72s/it] 70%|███████ | 366/520 [23:05<09:34, 3.73s/it] {'loss': 5.1968, 'grad_norm': 8.325417237855545e-05, 'learning_rate': 0.6396353454734313, 'epoch': 0.7} + 70%|███████ | 366/520 [23:05<09:34, 3.73s/it] 71%|███████ | 367/520 [23:09<09:30, 3.73s/it] {'loss': 5.3491, 'grad_norm': 0.00010329811888266181, 'learning_rate': 0.6319930557302914, 'epoch': 0.71} + 71%|███████ | 367/520 [23:09<09:30, 3.73s/it] 71%|███████ | 368/520 [23:12<09:25, 3.72s/it] {'loss': 4.7283, 'grad_norm': 0.000164935672970694, 'learning_rate': 0.6243844916478156, 'epoch': 0.71} + 71%|███████ | 368/520 [23:12<09:25, 3.72s/it] 71%|███████ | 369/520 [23:16<09:24, 3.74s/it] {'loss': 5.6204, 'grad_norm': 0.00012343670423500645, 'learning_rate': 0.616809948850193, 'epoch': 0.71} + 71%|███████ | 369/520 [23:16<09:24, 3.74s/it] 71%|███████ | 370/520 [23:20<09:18, 3.73s/it] {'loss': 4.9603, 'grad_norm': 9.484152032479243e-05, 'learning_rate': 0.6092697216397478, 'epoch': 0.71} + 71%|███████ | 370/520 [23:20<09:18, 3.73s/it] 71%|███████▏ | 371/520 [23:24<09:14, 3.72s/it] {'loss': 5.1191, 'grad_norm': 0.00015888539270391867, 'learning_rate': 0.6017641029854996, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:24<09:14, 3.72s/it] 72%|███████▏ | 372/520 [23:27<09:09, 3.71s/it] {'loss': 6.2329, 'grad_norm': 0.00011778697730741558, 'learning_rate': 0.5942933845117836, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:27<09:09, 3.71s/it] 72%|███████▏ | 373/520 [23:31<09:04, 3.71s/it] {'loss': 5.9462, 'grad_norm': 0.00018764892070279724, 'learning_rate': 0.586857856486919, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:31<09:04, 3.71s/it] 72%|███████▏ | 374/520 [23:35<09:00, 3.70s/it] {'loss': 5.0542, 'grad_norm': 0.00015640564559215344, 'learning_rate': 0.5794578078119291, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:35<09:00, 3.70s/it] 72%|███████▏ | 375/520 [23:38<08:56, 3.70s/it] {'loss': 4.6786, 'grad_norm': 0.0001807390058292692, 'learning_rate': 0.5720935260093177, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:38<08:56, 3.70s/it] 72%|███████▏ | 376/520 [23:42<08:52, 3.70s/it] {'loss': 5.0716, 'grad_norm': 0.00019882468098994974, 'learning_rate': 0.5647652972118997, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:42<08:52, 3.70s/it] 72%|███████▎ | 377/520 [23:46<08:50, 3.71s/it] {'loss': 5.1124, 'grad_norm': 0.0002340232753743926, 'learning_rate': 0.5574734061516791, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:46<08:50, 3.71s/it] 73%|███████▎ | 378/520 [23:50<08:48, 3.72s/it] {'loss': 5.4444, 'grad_norm': 0.0001120158793474903, 'learning_rate': 0.5502181361487904, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:50<08:48, 3.72s/it] 73%|███████▎ | 379/520 [23:53<08:47, 3.74s/it] {'loss': 5.3695, 'grad_norm': 0.00013015177564703419, 'learning_rate': 0.5429997691004873, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:53<08:47, 3.74s/it] 73%|███████▎ | 380/520 [23:57<08:41, 3.73s/it] {'loss': 6.1192, 'grad_norm': 9.123315818831265e-05, 'learning_rate': 0.5358185854701909, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:57<08:41, 3.73s/it] 73%|███████▎ | 381/520 [24:01<08:34, 3.70s/it] {'loss': 5.2239, 'grad_norm': 9.312265433273792e-05, 'learning_rate': 0.5286748642765946, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:01<08:34, 3.70s/it] 73%|███████▎ | 382/520 [24:04<08:30, 3.70s/it] {'loss': 6.1145, 'grad_norm': 6.634449378076958e-05, 'learning_rate': 0.5215688830828187, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:04<08:30, 3.70s/it] 74%|███████▎ | 383/520 [24:08<08:25, 3.69s/it] {'loss': 4.7958, 'grad_norm': 0.00015375067522609763, 'learning_rate': 0.5145009179856295, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:08<08:25, 3.69s/it] 74%|███████▍ | 384/520 [24:12<08:28, 3.74s/it] {'loss': 7.1602, 'grad_norm': 7.575021257366733e-05, 'learning_rate': 0.5074712436047113, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:12<08:28, 3.74s/it] 74%|███████▍ | 385/520 [24:16<08:23, 3.73s/it] {'loss': 5.1685, 'grad_norm': 0.00015285409971348853, 'learning_rate': 0.5004801330719941, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:16<08:23, 3.73s/it] 74%|███████▍ | 386/520 [24:19<08:17, 3.72s/it] {'loss': 4.5976, 'grad_norm': 0.00012070464368159842, 'learning_rate': 0.4935278580210451, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:19<08:17, 3.72s/it] 74%|███████▍ | 387/520 [24:23<08:12, 3.71s/it] {'loss': 6.5708, 'grad_norm': 0.0001091950744025552, 'learning_rate': 0.48661468857650964, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:23<08:12, 3.71s/it] 75%|███████▍ | 388/520 [24:27<08:17, 3.77s/it] {'loss': 4.7134, 'grad_norm': 0.00014403162495262568, 'learning_rate': 0.47974089334362063, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:27<08:17, 3.77s/it] 75%|███████▍ | 389/520 [24:31<08:16, 3.79s/it] {'loss': 5.2295, 'grad_norm': 0.00012929559230385763, 'learning_rate': 0.47290673939775973, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:31<08:16, 3.79s/it] 75%|███████▌ | 390/520 [24:35<08:14, 3.80s/it] {'loss': 5.1036, 'grad_norm': 9.640637947935518e-05, 'learning_rate': 0.46611249227407947, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:35<08:14, 3.80s/it] 75%|███████▌ | 391/520 [24:38<08:05, 3.76s/it] {'loss': 5.5336, 'grad_norm': 8.322490173370667e-05, 'learning_rate': 0.4593584159571875, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:38<08:05, 3.76s/it] 75%|███████▌ | 392/520 [24:42<07:58, 3.74s/it] {'loss': 4.8416, 'grad_norm': 0.00011436367522649913, 'learning_rate': 0.4526447728708909, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:42<07:58, 3.74s/it] 76%|███████▌ | 393/520 [24:46<07:52, 3.72s/it] {'loss': 5.762, 'grad_norm': 9.800020684283985e-05, 'learning_rate': 0.4459718238679963, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:46<07:52, 3.72s/it] 76%|███████▌ | 394/520 [24:49<07:44, 3.69s/it] {'loss': 5.0739, 'grad_norm': 7.644465200440134e-05, 'learning_rate': 0.4393398282201788, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:49<07:44, 3.69s/it] 76%|███████▌ | 395/520 [24:53<07:39, 3.67s/it] {'loss': 4.9758, 'grad_norm': 0.0001160476824536165, 'learning_rate': 0.4327490436079051, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:53<07:39, 3.67s/it] 76%|███████▌ | 396/520 [24:57<07:36, 3.68s/it] {'loss': 5.2515, 'grad_norm': 0.000137620904284533, 'learning_rate': 0.42619972611042234, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:57<07:36, 3.68s/it] 76%|███████▋ | 397/520 [25:00<07:32, 3.68s/it] {'loss': 5.2391, 'grad_norm': 0.00010618690229442489, 'learning_rate': 0.4196921301958104, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:00<07:32, 3.68s/it] 77%|███████▋ | 398/520 [25:04<07:31, 3.70s/it] {'loss': 5.3573, 'grad_norm': 0.00011292244057303845, 'learning_rate': 0.413226508711091, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:04<07:31, 3.70s/it] 77%|███████▋ | 399/520 [25:08<07:26, 3.69s/it] {'loss': 5.8233, 'grad_norm': 0.00015688443455743627, 'learning_rate': 0.4068031128724075, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:08<07:26, 3.69s/it] 77%|███████▋ | 400/520 [25:11<07:26, 3.72s/it] {'loss': 5.7152, 'grad_norm': 0.00010828448566997492, 'learning_rate': 0.4004221922552608, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:11<07:26, 3.72s/it] 77%|███████▋ | 401/520 [25:15<07:28, 3.77s/it] {'loss': 4.2459, 'grad_norm': 0.00017826857806181086, 'learning_rate': 0.39408399478481404, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:15<07:28, 3.77s/it] 77%|███████▋ | 402/520 [25:19<07:31, 3.83s/it] {'loss': 4.6937, 'grad_norm': 0.00018669465580453763, 'learning_rate': 0.3877887667262599, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:19<07:31, 3.83s/it] 78%|███████▊ | 403/520 [25:23<07:32, 3.87s/it] {'loss': 4.9353, 'grad_norm': 0.00010885840458256602, 'learning_rate': 0.3815367526752516, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:23<07:32, 3.87s/it] 78%|███████▊ | 404/520 [25:27<07:31, 3.89s/it] {'loss': 4.7575, 'grad_norm': 0.000174637662850586, 'learning_rate': 0.3753281955483985, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:27<07:31, 3.89s/it] 78%|███████▊ | 405/520 [25:31<07:28, 3.90s/it] {'loss': 5.6487, 'grad_norm': 0.00012329377780672825, 'learning_rate': 0.36916333657383027, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:31<07:28, 3.90s/it] 78%|███████▊ | 406/520 [25:35<07:26, 3.91s/it] {'loss': 5.5652, 'grad_norm': 0.000237877857307698, 'learning_rate': 0.3630424152818203, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:35<07:26, 3.91s/it] 78%|███████▊ | 407/520 [25:39<07:21, 3.91s/it] {'loss': 5.6875, 'grad_norm': 0.00013439987734658725, 'learning_rate': 0.3569656694954838, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:39<07:21, 3.91s/it] 78%|███████▊ | 408/520 [25:43<07:17, 3.91s/it] {'loss': 4.9422, 'grad_norm': 0.00015387644727332442, 'learning_rate': 0.35093333532153315, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:43<07:17, 3.91s/it] 79%|███████▊ | 409/520 [25:47<07:13, 3.90s/it] {'loss': 5.6607, 'grad_norm': 0.0001600224964808386, 'learning_rate': 0.3449456471411058, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:47<07:13, 3.90s/it] 79%|███████▉ | 410/520 [25:51<07:08, 3.90s/it] {'loss': 4.4536, 'grad_norm': 0.00044641271349466914, 'learning_rate': 0.3390028376006589, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:51<07:08, 3.90s/it] 79%|███████▉ | 411/520 [25:55<07:05, 3.90s/it] {'loss': 5.2899, 'grad_norm': 0.000177231878357367, 'learning_rate': 0.33310513760292787, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:55<07:05, 3.90s/it] 79%|███████▉ | 412/520 [25:58<07:02, 3.91s/it] {'loss': 5.1793, 'grad_norm': 0.00021964056938518896, 'learning_rate': 0.3272527762979553, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:58<07:02, 3.91s/it] 79%|███████▉ | 413/520 [26:02<06:58, 3.91s/it] {'loss': 6.3597, 'grad_norm': 0.0002906364730544598, 'learning_rate': 0.32144598107418976, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:02<06:58, 3.91s/it] 80%|███████▉ | 414/520 [26:06<06:56, 3.93s/it] {'loss': 5.232, 'grad_norm': 0.0003484549625526129, 'learning_rate': 0.31568497754964703, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:06<06:56, 3.93s/it] 80%|███████▉ | 415/520 [26:10<06:48, 3.89s/it] {'loss': 4.5783, 'grad_norm': 0.00045534074892259716, 'learning_rate': 0.3099699895631474, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:10<06:48, 3.89s/it] 80%|████████ | 416/520 [26:14<06:38, 3.83s/it] {'loss': 4.997, 'grad_norm': 0.00045127143496236365, 'learning_rate': 0.30430123916561674, 'epoch': 0.8} + 80%|████████ | 416/520 [26:14<06:38, 3.83s/it] 80%|████████ | 417/520 [26:18<06:29, 3.78s/it] {'loss': 5.1143, 'grad_norm': 0.0002087141853157493, 'learning_rate': 0.2986789466114582, 'epoch': 0.8} + 80%|████████ | 417/520 [26:18<06:29, 3.78s/it] 80%|████████ | 418/520 [26:21<06:22, 3.75s/it] {'loss': 5.3057, 'grad_norm': 0.00027798948350897366, 'learning_rate': 0.29310333034999747, 'epoch': 0.8} + 80%|████████ | 418/520 [26:21<06:22, 3.75s/it] 81%|████████ | 419/520 [26:25<06:16, 3.73s/it] {'loss': 5.3561, 'grad_norm': 0.0006076978270588919, 'learning_rate': 0.28757460701699217, 'epoch': 0.81} + 81%|████████ | 419/520 [26:25<06:16, 3.73s/it] 81%|████████ | 420/520 [26:29<06:12, 3.72s/it] {'loss': 4.7742, 'grad_norm': 0.0009814856518963546, 'learning_rate': 0.28209299142621524, 'epoch': 0.81} + 81%|████████ | 420/520 [26:29<06:12, 3.72s/it] 81%|████████ | 421/520 [26:32<06:06, 3.70s/it] {'loss': 4.5282, 'grad_norm': 0.001278681578444233, 'learning_rate': 0.27665869656110975, 'epoch': 0.81} + 81%|████████ | 421/520 [26:32<06:06, 3.70s/it] 81%|████████ | 422/520 [26:36<06:01, 3.69s/it] {'loss': 4.7755, 'grad_norm': 0.0007642222881771617, 'learning_rate': 0.27127193356651214, 'epoch': 0.81} + 81%|████████ | 422/520 [26:36<06:01, 3.69s/it] 81%|████████▏ | 423/520 [26:40<06:02, 3.74s/it] {'loss': 5.354, 'grad_norm': 0.0013372830759911048, 'learning_rate': 0.26593291174045, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:40<06:02, 3.74s/it] 82%|████████▏ | 424/520 [26:44<06:04, 3.80s/it] {'loss': 6.1783, 'grad_norm': 0.00034650066247368866, 'learning_rate': 0.260641838526008, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:44<06:04, 3.80s/it] 82%|████████▏ | 425/520 [26:47<05:58, 3.78s/it] {'loss': 4.7756, 'grad_norm': 0.0006306442834322125, 'learning_rate': 0.25539891950326876, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:47<05:58, 3.78s/it] 82%|████████▏ | 426/520 [26:51<05:52, 3.75s/it] {'loss': 5.5685, 'grad_norm': 0.00016371422182472777, 'learning_rate': 0.25020435838132676, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:51<05:52, 3.75s/it] 82%|████████▏ | 427/520 [26:55<05:47, 3.74s/it] {'loss': 4.5117, 'grad_norm': 0.000490329125507369, 'learning_rate': 0.24505835699037037, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:55<05:47, 3.74s/it] 82%|████████▏ | 428/520 [26:59<05:43, 3.73s/it] {'loss': 4.4718, 'grad_norm': 0.0002550729264227022, 'learning_rate': 0.2399611152738429, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:59<05:43, 3.73s/it] 82%|████████▎ | 429/520 [27:02<05:40, 3.75s/it] {'loss': 4.945, 'grad_norm': 0.0001895326307865199, 'learning_rate': 0.23491283128067175, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:02<05:40, 3.75s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:06<05:37, 3.75s/it] {'loss': 4.4393, 'grad_norm': 0.0001362444815662664, 'learning_rate': 0.2299137011575738, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:06<05:37, 3.75s/it] 83%|████████▎ | 431/520 [27:10<05:32, 3.73s/it] {'loss': 5.9779, 'grad_norm': 0.00014158229358157847, 'learning_rate': 0.22496391914143632, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:10<05:32, 3.73s/it] 83%|████████▎ | 432/520 [27:13<05:28, 3.73s/it] {'loss': 4.6826, 'grad_norm': 0.00014874867091381727, 'learning_rate': 0.2200636775517666, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:13<05:28, 3.73s/it] 83%|████████▎ | 433/520 [27:17<05:24, 3.73s/it] {'loss': 5.0425, 'grad_norm': 0.00014891233510977756, 'learning_rate': 0.215213166783223, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:17<05:24, 3.73s/it] 83%|████████▎ | 434/520 [27:21<05:19, 3.72s/it] {'loss': 4.1103, 'grad_norm': 0.0002193955915307122, 'learning_rate': 0.21041257529821455, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:21<05:19, 3.72s/it] 84%|████████▎ | 435/520 [27:25<05:16, 3.72s/it] {'loss': 5.291, 'grad_norm': 0.0001353278425099296, 'learning_rate': 0.20566208961958043, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:25<05:16, 3.72s/it] 84%|████████▍ | 436/520 [27:28<05:11, 3.71s/it] {'loss': 4.7111, 'grad_norm': 0.00023119707110131328, 'learning_rate': 0.20096189432334194, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:28<05:11, 3.71s/it] 84%|████████▍ | 437/520 [27:32<05:07, 3.71s/it] {'loss': 5.5669, 'grad_norm': 9.31421817505952e-05, 'learning_rate': 0.1963121720315304, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:32<05:07, 3.71s/it] 84%|████████▍ | 438/520 [27:36<05:03, 3.70s/it] {'loss': 4.3789, 'grad_norm': 0.00013953041608612997, 'learning_rate': 0.191713103405092, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:36<05:03, 3.70s/it] 84%|████████▍ | 439/520 [27:39<04:59, 3.70s/it] {'loss': 5.5772, 'grad_norm': 0.00011534089075580683, 'learning_rate': 0.18716486713686947, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:39<04:59, 3.70s/it] 85%|████████▍ | 440/520 [27:43<04:55, 3.69s/it] {'loss': 5.0701, 'grad_norm': 0.00017248169782005032, 'learning_rate': 0.182667639944657, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:43<04:55, 3.69s/it] 85%|████████▍ | 441/520 [27:47<04:53, 3.71s/it] {'loss': 5.8172, 'grad_norm': 9.228657653039104e-05, 'learning_rate': 0.1782215965643364, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:47<04:53, 3.71s/it] 85%|████████▌ | 442/520 [27:50<04:48, 3.70s/it] {'loss': 4.9648, 'grad_norm': 9.178706279527447e-05, 'learning_rate': 0.1738269097430855, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:50<04:48, 3.70s/it] 85%|████████▌ | 443/520 [27:54<04:45, 3.70s/it] {'loss': 5.0587, 'grad_norm': 8.773476401543395e-05, 'learning_rate': 0.16948375023266743, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:54<04:45, 3.70s/it] 85%|████████▌ | 444/520 [27:58<04:41, 3.70s/it] {'loss': 4.9113, 'grad_norm': 9.964060738172392e-05, 'learning_rate': 0.16519228678279718, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:58<04:41, 3.70s/it] 86%|████████▌ | 445/520 [28:02<04:37, 3.70s/it] {'loss': 4.6936, 'grad_norm': 7.502488421032587e-05, 'learning_rate': 0.16095268613458302, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:02<04:37, 3.70s/it] 86%|████████▌ | 446/520 [28:05<04:35, 3.72s/it] {'loss': 5.9973, 'grad_norm': 7.928966646952554e-05, 'learning_rate': 0.1567651130140486, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:05<04:35, 3.72s/it] 86%|████████▌ | 447/520 [28:09<04:31, 3.71s/it] {'loss': 5.393, 'grad_norm': 0.00012122945993101774, 'learning_rate': 0.15262973012573394, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:09<04:31, 3.71s/it] 86%|████████▌ | 448/520 [28:13<04:26, 3.70s/it] {'loss': 4.8449, 'grad_norm': 7.223298793382131e-05, 'learning_rate': 0.14854669814637145, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:13<04:26, 3.70s/it] 86%|████████▋ | 449/520 [28:16<04:24, 3.72s/it] {'loss': 6.0421, 'grad_norm': 0.0001180323091881012, 'learning_rate': 0.14451617571864528, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:16<04:24, 3.72s/it] 87%|████████▋ | 450/520 [28:20<04:19, 3.71s/it] {'loss': 5.3174, 'grad_norm': 9.412124701081511e-05, 'learning_rate': 0.1405383194450251, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:20<04:19, 3.71s/it] 87%|████████▋ | 451/520 [28:24<04:17, 3.73s/it] {'loss': 5.3623, 'grad_norm': 8.268823446393261e-05, 'learning_rate': 0.1366132838816836, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:24<04:17, 3.73s/it] 87%|████████▋ | 452/520 [28:28<04:15, 3.75s/it] {'loss': 5.9485, 'grad_norm': 6.055948016285891e-05, 'learning_rate': 0.1327412215324903, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:28<04:15, 3.75s/it] 87%|████████▋ | 453/520 [28:32<04:11, 3.76s/it] {'loss': 6.0429, 'grad_norm': 9.626994865966585e-05, 'learning_rate': 0.1289222828430855, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:32<04:11, 3.76s/it] 87%|████████▋ | 454/520 [28:35<04:06, 3.74s/it] {'loss': 4.8211, 'grad_norm': 7.371711503017355e-05, 'learning_rate': 0.1251566161950357, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:35<04:06, 3.74s/it] 88%|████████▊ | 455/520 [28:39<04:02, 3.73s/it] {'loss': 5.1143, 'grad_norm': 7.396724154145886e-05, 'learning_rate': 0.12144436790006902, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:39<04:02, 3.73s/it] 88%|████████▊ | 456/520 [28:43<03:58, 3.72s/it] {'loss': 4.8194, 'grad_norm': 5.7279547420878774e-05, 'learning_rate': 0.1177856821943884, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:43<03:58, 3.72s/it] 88%|████████▊ | 457/520 [28:46<03:53, 3.71s/it] {'loss': 6.784, 'grad_norm': 0.00012136235404483949, 'learning_rate': 0.11418070123306989, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:46<03:53, 3.71s/it] 88%|████████▊ | 458/520 [28:50<03:50, 3.71s/it] {'loss': 5.4875, 'grad_norm': 0.00011006672350472358, 'learning_rate': 0.11062956508453703, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:50<03:50, 3.71s/it] 88%|████████▊ | 459/520 [28:54<03:49, 3.76s/it] {'loss': 5.2261, 'grad_norm': 8.576020613402165e-05, 'learning_rate': 0.10713241172511967, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:54<03:49, 3.76s/it] 88%|████████▊ | 460/520 [28:58<03:48, 3.81s/it] {'loss': 4.6504, 'grad_norm': 0.0001335500260043366, 'learning_rate': 0.1036893770336938, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:58<03:48, 3.81s/it] 89%|████████▊ | 461/520 [29:02<03:45, 3.82s/it] {'loss': 6.8507, 'grad_norm': 9.727866352155962e-05, 'learning_rate': 0.10030059478640024, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:02<03:45, 3.82s/it] 89%|████████▉ | 462/520 [29:05<03:41, 3.81s/it] {'loss': 6.4322, 'grad_norm': 0.00010809103685125899, 'learning_rate': 0.09696619665144901, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:05<03:41, 3.81s/it] 89%|████████▉ | 463/520 [29:09<03:34, 3.77s/it] {'loss': 4.8279, 'grad_norm': 0.00014993708493203674, 'learning_rate': 0.09368631218400136, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:09<03:34, 3.77s/it] 89%|████████▉ | 464/520 [29:13<03:30, 3.76s/it] {'loss': 5.5148, 'grad_norm': 6.301631272666945e-05, 'learning_rate': 0.09046106882113752, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:13<03:30, 3.76s/it] 89%|████████▉ | 465/520 [29:17<03:25, 3.74s/it] {'loss': 5.7958, 'grad_norm': 8.62360035826058e-05, 'learning_rate': 0.0872905918769048, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:17<03:25, 3.74s/it] 90%|████████▉ | 466/520 [29:20<03:20, 3.71s/it] {'loss': 5.0616, 'grad_norm': 8.180451989298717e-05, 'learning_rate': 0.08417500453744864, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:20<03:20, 3.71s/it] 90%|████████▉ | 467/520 [29:24<03:16, 3.71s/it] {'loss': 5.9542, 'grad_norm': 9.16060020038979e-05, 'learning_rate': 0.08111442785622597, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:24<03:16, 3.71s/it] 90%|█████████ | 468/520 [29:28<03:12, 3.70s/it] {'loss': 5.3708, 'grad_norm': 6.531972735689672e-05, 'learning_rate': 0.07810898074930245, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:28<03:12, 3.70s/it] 90%|█████████ | 469/520 [29:31<03:08, 3.69s/it] {'loss': 5.478, 'grad_norm': 7.511913757924275e-05, 'learning_rate': 0.075158779990731, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:31<03:08, 3.69s/it] 90%|█████████ | 470/520 [29:35<03:04, 3.69s/it] {'loss': 4.9616, 'grad_norm': 5.383278614277714e-05, 'learning_rate': 0.07226394020801646, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:35<03:04, 3.69s/it] 91%|█████████ | 471/520 [29:39<03:00, 3.68s/it] {'loss': 5.6265, 'grad_norm': 0.00017552659848315942, 'learning_rate': 0.06942457387765977, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:39<03:00, 3.68s/it] 91%|█████████ | 472/520 [29:42<02:57, 3.71s/it] {'loss': 4.881, 'grad_norm': 0.00010167999353809541, 'learning_rate': 0.06664079132078882, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:42<02:57, 3.71s/it] 91%|█████████ | 473/520 [29:46<02:54, 3.72s/it] {'loss': 4.9767, 'grad_norm': 6.653604191025173e-05, 'learning_rate': 0.06391270069887289, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:46<02:54, 3.72s/it] 91%|█████████ | 474/520 [29:50<02:50, 3.71s/it] {'loss': 6.2398, 'grad_norm': 0.00011127837240925377, 'learning_rate': 0.061240408009518355, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:50<02:50, 3.71s/it] 91%|█████████▏| 475/520 [29:54<02:47, 3.73s/it] {'loss': 5.4057, 'grad_norm': 6.682900850236429e-05, 'learning_rate': 0.058624017082350766, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:54<02:47, 3.73s/it] 92%|█████████▏| 476/520 [29:57<02:43, 3.73s/it] {'loss': 5.0906, 'grad_norm': 6.683927311325844e-05, 'learning_rate': 0.056063629574981955, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:57<02:43, 3.73s/it] 92%|█████████▏| 477/520 [30:01<02:40, 3.72s/it] {'loss': 4.9134, 'grad_norm': 6.466438989535979e-05, 'learning_rate': 0.05355934496905851, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:01<02:40, 3.72s/it] 92%|█████████▏| 478/520 [30:05<02:36, 3.72s/it] {'loss': 4.7805, 'grad_norm': 0.00010279234806194022, 'learning_rate': 0.0511112605663977, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:05<02:36, 3.72s/it] 92%|█████████▏| 479/520 [30:08<02:32, 3.72s/it] {'loss': 6.2054, 'grad_norm': 6.771616141298899e-05, 'learning_rate': 0.048719471485205834, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:08<02:32, 3.72s/it] 92%|█████████▏| 480/520 [30:12<02:28, 3.72s/it] {'loss': 6.0823, 'grad_norm': 7.615743806923523e-05, 'learning_rate': 0.046384070656383225, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:12<02:28, 3.72s/it] 92%|█████████▎| 481/520 [30:16<02:25, 3.72s/it] {'loss': 5.9713, 'grad_norm': 0.00012941114616785293, 'learning_rate': 0.044105148819913564, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:16<02:25, 3.72s/it] 93%|█████████▎| 482/520 [30:20<02:21, 3.72s/it] {'loss': 6.3702, 'grad_norm': 8.530651347420623e-05, 'learning_rate': 0.04188279452133825, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:20<02:21, 3.72s/it] 93%|█████████▎| 483/520 [30:23<02:17, 3.72s/it] {'loss': 5.3117, 'grad_norm': 9.202068965931997e-05, 'learning_rate': 0.039717094108314976, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:23<02:17, 3.72s/it] 93%|█████████▎| 484/520 [30:27<02:14, 3.73s/it] {'loss': 5.335, 'grad_norm': 0.0001381553562245004, 'learning_rate': 0.03760813172726457, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:27<02:14, 3.73s/it] 93%|█████████▎| 485/520 [30:31<02:11, 3.77s/it] {'loss': 4.8731, 'grad_norm': 0.00014777827042548967, 'learning_rate': 0.03555598932009996, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:31<02:11, 3.77s/it] 93%|█████████▎| 486/520 [30:35<02:08, 3.79s/it] {'loss': 5.3081, 'grad_norm': 7.475675742938711e-05, 'learning_rate': 0.03356074662104319, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:35<02:08, 3.79s/it] 94%|█████████▎| 487/520 [30:38<02:03, 3.76s/it] {'loss': 4.6346, 'grad_norm': 0.00015667305892737984, 'learning_rate': 0.03162248115352745, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:38<02:03, 3.76s/it] 94%|█████████▍| 488/520 [30:42<02:00, 3.76s/it] {'loss': 4.7865, 'grad_norm': 8.31191994659236e-05, 'learning_rate': 0.02974126822718426, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:42<02:00, 3.76s/it] 94%|█████████▍| 489/520 [30:46<01:56, 3.75s/it] {'loss': 5.8418, 'grad_norm': 5.392806637873788e-05, 'learning_rate': 0.027917180934918517, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:46<01:56, 3.75s/it] 94%|█████████▍| 490/520 [30:50<01:51, 3.72s/it] {'loss': 4.9697, 'grad_norm': 6.634525734638363e-05, 'learning_rate': 0.02615029015006759, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:50<01:51, 3.72s/it] 94%|█████████▍| 491/520 [30:53<01:48, 3.73s/it] {'loss': 4.9333, 'grad_norm': 0.00011497690382727015, 'learning_rate': 0.024440664523648015, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:53<01:48, 3.73s/it] 95%|█████████▍| 492/520 [30:57<01:44, 3.73s/it] {'loss': 5.2449, 'grad_norm': 8.584307518146233e-05, 'learning_rate': 0.02278837048168797, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:57<01:44, 3.73s/it] 95%|█████████▍| 493/520 [31:01<01:40, 3.72s/it] {'loss': 6.2809, 'grad_norm': 7.821099513222051e-05, 'learning_rate': 0.02119347222264617, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:01<01:40, 3.72s/it] 95%|█████████▌| 494/520 [31:04<01:36, 3.70s/it] {'loss': 5.1676, 'grad_norm': 0.00013457941979391202, 'learning_rate': 0.019656031714918365, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:04<01:36, 3.70s/it] 95%|█████████▌| 495/520 [31:08<01:32, 3.70s/it] {'loss': 4.5368, 'grad_norm': 0.0001244752321156134, 'learning_rate': 0.018176108694427928, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:08<01:32, 3.70s/it] 95%|█████████▌| 496/520 [31:12<01:28, 3.69s/it] {'loss': 4.7104, 'grad_norm': 0.00011438827983988644, 'learning_rate': 0.016753760662307216, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:12<01:28, 3.69s/it] 96%|█████████▌| 497/520 [31:15<01:24, 3.69s/it] {'loss': 5.7179, 'grad_norm': 6.746023355927844e-05, 'learning_rate': 0.01538904288266102, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:15<01:24, 3.69s/it] 96%|█████████▌| 498/520 [31:19<01:20, 3.68s/it] {'loss': 4.7713, 'grad_norm': 0.0001035128921689688, 'learning_rate': 0.014082008380420785, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:19<01:20, 3.68s/it] 96%|█████████▌| 499/520 [31:23<01:17, 3.71s/it] {'loss': 6.1693, 'grad_norm': 0.00012689032263208953, 'learning_rate': 0.012832707939284427, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:23<01:17, 3.71s/it] 96%|█████████▌| 500/520 [31:27<01:14, 3.70s/it] {'loss': 5.5096, 'grad_norm': 0.00012952417745190205, 'learning_rate': 0.011641190099741905, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:27<01:14, 3.70s/it] 96%|█████████▋| 501/520 [31:30<01:10, 3.69s/it] {'loss': 6.0277, 'grad_norm': 6.618585292643685e-05, 'learning_rate': 0.010507501157190569, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:30<01:10, 3.69s/it] 97%|█████████▋| 502/520 [31:34<01:06, 3.69s/it] {'loss': 5.0877, 'grad_norm': 8.232372562212789e-05, 'learning_rate': 0.009431685160136094, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:34<01:06, 3.69s/it] 97%|█████████▋| 503/520 [31:38<01:02, 3.69s/it] {'loss': 5.9147, 'grad_norm': 6.556385993507312e-05, 'learning_rate': 0.008413783908480355, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:38<01:02, 3.69s/it] 97%|█████████▋| 504/520 [31:41<00:59, 3.70s/it] {'loss': 5.2937, 'grad_norm': 0.00015032084952751397, 'learning_rate': 0.007453836951897885, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:41<00:59, 3.70s/it] 97%|█████████▋| 505/520 [31:45<00:55, 3.70s/it] {'loss': 5.2096, 'grad_norm': 7.580124134421377e-05, 'learning_rate': 0.00655188158829928, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:45<00:55, 3.70s/it] 97%|█████████▋| 506/520 [31:49<00:51, 3.69s/it] {'loss': 4.7273, 'grad_norm': 0.00019079193611025292, 'learning_rate': 0.005707952862381682, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:49<00:51, 3.69s/it] 98%|█████████▊| 507/520 [31:52<00:48, 3.70s/it] {'loss': 6.5895, 'grad_norm': 7.55299639620037e-05, 'learning_rate': 0.004922083564267377, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:52<00:48, 3.70s/it] 98%|█████████▊| 508/520 [31:56<00:44, 3.70s/it] {'loss': 5.4209, 'grad_norm': 6.293432577224989e-05, 'learning_rate': 0.004194304228229806, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:56<00:44, 3.70s/it] 98%|█████████▊| 509/520 [32:00<00:40, 3.71s/it] {'loss': 4.9342, 'grad_norm': 0.00010734654374008937, 'learning_rate': 0.0035246431315066884, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:00<00:40, 3.71s/it] 98%|█████████▊| 510/520 [32:04<00:37, 3.77s/it] {'loss': 4.9637, 'grad_norm': 8.090024364383185e-05, 'learning_rate': 0.0029131262932022284, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:04<00:37, 3.77s/it] 98%|█████████▊| 511/520 [32:08<00:34, 3.80s/it] {'loss': 5.0217, 'grad_norm': 9.868915053038764e-05, 'learning_rate': 0.002359777473275093, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:08<00:34, 3.80s/it] 98%|█████████▊| 512/520 [32:12<00:30, 3.82s/it] {'loss': 4.5982, 'grad_norm': 8.642606969748077e-05, 'learning_rate': 0.0018646181716164834, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:12<00:30, 3.82s/it] 99%|█████████▊| 513/520 [32:15<00:26, 3.83s/it] {'loss': 5.1573, 'grad_norm': 0.0001473793132937973, 'learning_rate': 0.0014276676272133026, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:15<00:26, 3.83s/it] 99%|█████████▉| 514/520 [32:19<00:23, 3.86s/it] {'loss': 5.139, 'grad_norm': 9.862362315150766e-05, 'learning_rate': 0.0010489428174020876, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:19<00:23, 3.86s/it] 99%|█████████▉| 515/520 [32:23<00:19, 3.88s/it] {'loss': 5.4851, 'grad_norm': 7.27834524366498e-05, 'learning_rate': 0.0007284584572085362, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:23<00:19, 3.88s/it] 99%|█████████▉| 516/520 [32:27<00:15, 3.89s/it] {'loss': 5.0242, 'grad_norm': 8.215508699292652e-05, 'learning_rate': 0.0004662269987756318, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:27<00:15, 3.89s/it] 99%|█████████▉| 517/520 [32:31<00:11, 3.88s/it] {'loss': 6.1746, 'grad_norm': 5.575025495879048e-05, 'learning_rate': 0.00026225863088036316, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:31<00:11, 3.88s/it] 100%|█████████▉| 518/520 [32:35<00:07, 3.86s/it] {'loss': 5.0225, 'grad_norm': 6.665852242782806e-05, 'learning_rate': 0.00011656127853770792, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:35<00:07, 3.86s/it] 100%|█████████▉| 519/520 [32:39<00:03, 3.86s/it] {'loss': 5.9155, 'grad_norm': 8.549589171253219e-05, 'learning_rate': 2.9140602692712125e-05, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:39<00:03, 3.86s/it] 100%|██████████| 520/520 [32:43<00:00, 4.14s/it] {'loss': 6.3772, 'grad_norm': 0.00011153067970345816, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:44<00:00, 4.14s/it] {'train_runtime': 1964.0307, 'train_samples_per_second': 33.874, 'train_steps_per_second': 0.265, 'train_loss': 5.507999032048079, 'epoch': 1.0} + 100%|██████████| 520/520 [32:44<00:00, 4.14s/it] 100%|██████████| 520/520 [32:44<00:00, 3.78s/it] +[2025-10-10 07:24:21,029] [INFO] [launch.py:348:main] Process 1808017 exits successfully. +[2025-10-10 07:24:21,030] [INFO] [launch.py:348:main] Process 1808014 exits successfully. +[2025-10-10 07:24:22,032] [INFO] [launch.py:348:main] Process 1808019 exits successfully. +[2025-10-10 07:24:22,032] [INFO] [launch.py:348:main] Process 1808018 exits successfully. +[2025-10-10 07:24:22,033] [INFO] [launch.py:348:main] Process 1808013 exits successfully. +[2025-10-10 07:24:22,033] [INFO] [launch.py:348:main] Process 1808016 exits successfully. +[2025-10-10 07:24:22,034] [INFO] [launch.py:348:main] Process 1808015 exits successfully. +[2025-10-10 07:24:26,039] [INFO] [launch.py:348:main] Process 1808012 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251010_063715.log +Timestamp: 2025-10-10 07:24:28 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251010_095404.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251010_095404.log new file mode 100644 index 0000000000000000000000000000000000000000..84b98ec5016c6146dd04c73f293eb62ddf0d88cd --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251010_095404.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251010_095404.log +Timestamp: 2025-10-10 09:54:04 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 09:54:06,850] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:09,739] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 09:54:09,741] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 3e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 3e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 09:54:12,367] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:13,416] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 09:54:13,416] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 09:54:13,416] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 09:54:13,416] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 09:54:13,416] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 09:54:13,416] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 09:54:13,416] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 09:54:13,418] [INFO] [launch.py:253:main] process 1964832 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:13,420] [INFO] [launch.py:253:main] process 1964833 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:13,422] [INFO] [launch.py:253:main] process 1964834 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:13,425] [INFO] [launch.py:253:main] process 1964835 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:13,427] [INFO] [launch.py:253:main] process 1964836 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:13,429] [INFO] [launch.py:253:main] process 1964837 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:13,431] [INFO] [launch.py:253:main] process 1964838 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 09:54:13,433] [INFO] [launch.py:253:main] process 1964839 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 09:54:20,116] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:20,230] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:20,234] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:20,291] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:20,296] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:20,298] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:20,313] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:20,313] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 09:54:20,512] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:20,629] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:20,636] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:20,690] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:20,697] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:20,706] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:20,715] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 09:54:20,715] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 09:54:20,720] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1964832:1964832 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964832:1964832 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964832:1964832 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1964832:1964832 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1964832:1964832 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1964832:1964832 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:1964835:1964835 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1964835:1964835 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964835:1964835 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964835:1964835 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1964835:1964835 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1964835:1964835 [3] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1964834:1964834 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1964834:1964834 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964834:1964834 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964834:1964834 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1964834:1964834 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1964834:1964834 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1964837:1964837 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1964837:1964837 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964837:1964837 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964837:1964837 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1964837:1964837 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1964837:1964837 [5] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1964839:1964839 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1964839:1964839 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964839:1964839 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964839:1964839 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1964839:1964839 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1964839:1964839 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1964833:1964833 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1964833:1964833 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964833:1964833 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964833:1964833 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1964833:1964833 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1964833:1964833 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1964838:1964838 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1964838:1964838 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964838:1964838 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964838:1964838 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1964838:1964838 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1964838:1964838 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1964836:1964836 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1964836:1964836 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964836:1964836 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964836:1964836 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1964836:1964836 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1964836:1964836 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO ncclCommInitRank comm 0x564824db7e40 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x35b9583704088f6a - Init START +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO ncclCommInitRank comm 0x55e2e5cd46e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x35b9583704088f6a - Init START +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO ncclCommInitRank comm 0x55aa34182480 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x35b9583704088f6a - Init START +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO ncclCommInitRank comm 0x555f6a676c70 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x35b9583704088f6a - Init START +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO ncclCommInitRank comm 0x561fdc55b5f0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x35b9583704088f6a - Init START +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO ncclCommInitRank comm 0x55926aff5220 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x35b9583704088f6a - Init START +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO ncclCommInitRank comm 0x557a0de1b710 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x35b9583704088f6a - Init START +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO ncclCommInitRank comm 0x55bfe9ebf7d0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x35b9583704088f6a - Init START +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO comm 0x55bfe9ebf7d0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO comm 0x555f6a676c70 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO comm 0x561fdc55b5f0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO comm 0x55926aff5220 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO comm 0x557a0de1b710 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO comm 0x564824db7e40 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO comm 0x55aa34182480 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO comm 0x55e2e5cd46e0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1964836:1966517 [4] NCCL INFO ncclCommInitRank comm 0x55aa34182480 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x35b9583704088f6a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964838:1966516 [6] NCCL INFO ncclCommInitRank comm 0x55e2e5cd46e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x35b9583704088f6a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1964839:1966514 [7] NCCL INFO ncclCommInitRank comm 0x55926aff5220 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x35b9583704088f6a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964837:1966513 [5] NCCL INFO ncclCommInitRank comm 0x564824db7e40 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x35b9583704088f6a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1964835:1966511 [3] NCCL INFO ncclCommInitRank comm 0x55bfe9ebf7d0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x35b9583704088f6a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964834:1966512 [2] NCCL INFO ncclCommInitRank comm 0x555f6a676c70 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x35b9583704088f6a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964832:1966510 [0] NCCL INFO ncclCommInitRank comm 0x557a0de1b710 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x35b9583704088f6a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1964833:1966515 [1] NCCL INFO ncclCommInitRank comm 0x561fdc55b5f0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x35b9583704088f6a - Init COMPLETE +[2025-10-10 09:55:07,240] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 09:55:08,963] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 09:55:26,552 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 09:55:26,558 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:007->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1964837:1971446 [5] NCCL INFO ncclCommInitRank comm 0x7fcee406abe0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xc7d13e2fba1fa27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964838:1971450 [6] NCCL INFO ncclCommInitRank comm 0x7fd70806ab80 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xc7d13e2fba1fa27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964834:1971447 [2] NCCL INFO ncclCommInitRank comm 0x7fb96006a2f0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xc7d13e2fba1fa27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964839:1971451 [7] NCCL INFO ncclCommInitRank comm 0x7f751c06a470 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xc7d13e2fba1fa27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964835:1971449 [3] NCCL INFO ncclCommInitRank comm 0x7f8a5806ac70 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xc7d13e2fba1fa27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964833:1971448 [1] NCCL INFO ncclCommInitRank comm 0x7fbad406a8b0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xc7d13e2fba1fa27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964836:1971452 [4] NCCL INFO ncclCommInitRank comm 0x7fa04006adc0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xc7d13e2fba1fa27 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1964832:1971445 [0] NCCL INFO ncclCommInitRank comm 0x7f168006add0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xc7d13e2fba1fa27 - Init COMPLETE + 0%| | 1/520 [00:30<4:23:50, 30.50s/it] {'loss': 2.0453, 'grad_norm': 0.0048354576355012, 'learning_rate': 0.01875, 'epoch': 0.0} + 0%| | 1/520 [00:30<4:23:50, 30.50s/it] 0%| | 2/520 [00:34<2:08:30, 14.88s/it] {'loss': 2.0549, 'grad_norm': 0.005249234391390439, 'learning_rate': 0.0375, 'epoch': 0.0} + 0%| | 2/520 [00:34<2:08:30, 14.88s/it] 1%| | 3/520 [00:38<1:25:02, 9.87s/it] {'loss': 2.1899, 'grad_norm': 0.00600694460822913, 'learning_rate': 0.056249999999999994, 'epoch': 0.01} + 1%| | 3/520 [00:38<1:25:02, 9.87s/it] 1%| | 4/520 [00:42<1:04:38, 7.52s/it] {'loss': 2.0656, 'grad_norm': 0.0049634926972154745, 'learning_rate': 0.075, 'epoch': 0.01} + 1%| | 4/520 [00:42<1:04:38, 7.52s/it] 1%| | 5/520 [00:46<53:27, 6.23s/it] {'loss': 1.7901, 'grad_norm': 0.00277607607046223, 'learning_rate': 0.09375, 'epoch': 0.01} + 1%| | 5/520 [00:46<53:27, 6.23s/it] 1%| | 6/520 [00:50<46:31, 5.43s/it] {'loss': 1.4321, 'grad_norm': 0.000654223945409454, 'learning_rate': 0.11249999999999999, 'epoch': 0.01} + 1%| | 6/520 [00:50<46:31, 5.43s/it] 1%|▏ | 7/520 [00:54<42:10, 4.93s/it] {'loss': 1.5217, 'grad_norm': 0.0007774864144197179, 'learning_rate': 0.13125, 'epoch': 0.01} + 1%|▏ | 7/520 [00:54<42:10, 4.93s/it] 2%|▏ | 8/520 [00:58<41:01, 4.81s/it] {'loss': 1.5408, 'grad_norm': 0.0005915547075007427, 'learning_rate': 0.15, 'epoch': 0.02} + 2%|▏ | 8/520 [00:58<41:01, 4.81s/it] 2%|▏ | 9/520 [01:02<40:00, 4.70s/it] {'loss': 1.5881, 'grad_norm': 0.0005222561430065138, 'learning_rate': 0.16874999999999998, 'epoch': 0.02} + 2%|▏ | 9/520 [01:03<40:00, 4.70s/it] 2%|▏ | 10/520 [01:06<37:55, 4.46s/it] {'loss': 1.4235, 'grad_norm': 0.0006167692497152493, 'learning_rate': 0.1875, 'epoch': 0.02} + 2%|▏ | 10/520 [01:06<37:55, 4.46s/it] 2%|▏ | 11/520 [01:11<38:14, 4.51s/it] {'loss': 1.4657, 'grad_norm': 0.0005976025284972201, 'learning_rate': 0.20625, 'epoch': 0.02} + 2%|▏ | 11/520 [01:11<38:14, 4.51s/it] 2%|▏ | 12/520 [01:15<36:35, 4.32s/it] {'loss': 1.3353, 'grad_norm': 0.000543825858370634, 'learning_rate': 0.22499999999999998, 'epoch': 0.02} + 2%|▏ | 12/520 [01:15<36:35, 4.32s/it][2025-10-10 09:56:51,778] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:20<37:08, 4.40s/it] {'loss': 1.4014, 'grad_norm': 0.0006535886601904906, 'learning_rate': 0.24375, 'epoch': 0.03} + 2%|▎ | 13/520 [01:20<37:08, 4.40s/it] 3%|▎ | 14/520 [01:23<35:48, 4.25s/it] {'loss': 1.4478, 'grad_norm': 0.0009290022871934679, 'learning_rate': 0.2625, 'epoch': 0.03} + 3%|▎ | 14/520 [01:23<35:48, 4.25s/it] 3%|▎ | 15/520 [01:27<34:49, 4.14s/it] {'loss': 1.3727, 'grad_norm': 0.0008381839016081213, 'learning_rate': 0.28125, 'epoch': 0.03} + 3%|▎ | 15/520 [01:27<34:49, 4.14s/it] 3%|▎ | 16/520 [01:31<34:04, 4.06s/it] {'loss': 1.3315, 'grad_norm': 0.001008727973314021, 'learning_rate': 0.3, 'epoch': 0.03} + 3%|▎ | 16/520 [01:31<34:04, 4.06s/it] 3%|▎ | 17/520 [01:35<33:41, 4.02s/it] {'loss': 1.4595, 'grad_norm': 0.001214552601872104, 'learning_rate': 0.2999970859397307, 'epoch': 0.03} + 3%|▎ | 17/520 [01:35<33:41, 4.02s/it] 3%|▎ | 18/520 [01:39<33:17, 3.98s/it] {'loss': 1.3164, 'grad_norm': 0.001366390260009809, 'learning_rate': 0.2999883438721462, 'epoch': 0.03} + 3%|▎ | 18/520 [01:39<33:17, 3.98s/it] 4%|▎ | 19/520 [01:43<33:06, 3.96s/it] {'loss': 1.3391, 'grad_norm': 0.0015554500794382402, 'learning_rate': 0.29997377413691195, 'epoch': 0.04} + 4%|▎ | 19/520 [01:43<33:06, 3.96s/it] 4%|▍ | 20/520 [01:47<32:50, 3.94s/it] {'loss': 1.3039, 'grad_norm': 0.001897936472948687, 'learning_rate': 0.29995337730012245, 'epoch': 0.04} + 4%|▍ | 20/520 [01:47<32:50, 3.94s/it] 4%|▍ | 21/520 [01:51<32:42, 3.93s/it] {'loss': 1.388, 'grad_norm': 0.003071852895147204, 'learning_rate': 0.2999271541542791, 'epoch': 0.04} + 4%|▍ | 21/520 [01:51<32:42, 3.93s/it] 4%|▍ | 22/520 [01:55<32:35, 3.93s/it] {'loss': 1.4802, 'grad_norm': 0.002385154475671038, 'learning_rate': 0.2998951057182598, 'epoch': 0.04} + 4%|▍ | 22/520 [01:55<32:35, 3.93s/it] 4%|▍ | 23/520 [01:59<32:30, 3.93s/it] {'loss': 1.4045, 'grad_norm': 0.001884046606399549, 'learning_rate': 0.2998572332372787, 'epoch': 0.04} + 4%|▍ | 23/520 [01:59<32:30, 3.93s/it] 5%|▍ | 24/520 [02:02<32:21, 3.91s/it] {'loss': 1.3394, 'grad_norm': 0.002067902541499183, 'learning_rate': 0.29981353818283835, 'epoch': 0.05} + 5%|▍ | 24/520 [02:02<32:21, 3.91s/it] 5%|▍ | 25/520 [02:06<32:14, 3.91s/it] {'loss': 1.4213, 'grad_norm': 0.0025263097970045275, 'learning_rate': 0.29976402225267246, 'epoch': 0.05} + 5%|▍ | 25/520 [02:06<32:14, 3.91s/it] 5%|▌ | 26/520 [02:10<32:10, 3.91s/it] {'loss': 1.3739, 'grad_norm': 0.0019729584726324713, 'learning_rate': 0.2997086873706798, 'epoch': 0.05} + 5%|▌ | 26/520 [02:10<32:10, 3.91s/it] 5%|▌ | 27/520 [02:14<32:03, 3.90s/it] {'loss': 1.3224, 'grad_norm': 0.0023838972071871553, 'learning_rate': 0.2996475356868493, 'epoch': 0.05} + 5%|▌ | 27/520 [02:14<32:03, 3.90s/it] 5%|▌ | 28/520 [02:18<31:58, 3.90s/it] {'loss': 1.3357, 'grad_norm': 0.0026527635421574793, 'learning_rate': 0.299580569577177, 'epoch': 0.05} + 5%|▌ | 28/520 [02:18<31:58, 3.90s/it] 6%|▌ | 29/520 [02:22<31:56, 3.90s/it] {'loss': 1.3564, 'grad_norm': 0.0021841559014431472, 'learning_rate': 0.2995077916435733, 'epoch': 0.06} + 6%|▌ | 29/520 [02:22<31:56, 3.90s/it] 6%|▌ | 30/520 [02:26<31:52, 3.90s/it] {'loss': 1.438, 'grad_norm': 0.0019350659731003936, 'learning_rate': 0.29942920471376183, 'epoch': 0.06} + 6%|▌ | 30/520 [02:26<31:52, 3.90s/it] 6%|▌ | 31/520 [02:30<31:42, 3.89s/it] {'loss': 1.3459, 'grad_norm': 0.002014356517415491, 'learning_rate': 0.29934481184117007, 'epoch': 0.06} + 6%|▌ | 31/520 [02:30<31:42, 3.89s/it] 6%|▌ | 32/520 [02:34<31:37, 3.89s/it] {'loss': 1.2979, 'grad_norm': 0.002506070382617564, 'learning_rate': 0.2992546163048102, 'epoch': 0.06} + 6%|▌ | 32/520 [02:34<31:37, 3.89s/it] 6%|▋ | 33/520 [02:37<31:09, 3.84s/it] {'loss': 1.369, 'grad_norm': 0.002476872103108067, 'learning_rate': 0.29915862160915196, 'epoch': 0.06} + 6%|▋ | 33/520 [02:37<31:09, 3.84s/it] 7%|▋ | 34/520 [02:41<30:52, 3.81s/it] {'loss': 1.3573, 'grad_norm': 0.002507269512016921, 'learning_rate': 0.2990568314839864, 'epoch': 0.07} + 7%|▋ | 34/520 [02:41<30:52, 3.81s/it] 7%|▋ | 35/520 [02:45<30:36, 3.79s/it] {'loss': 1.3611, 'grad_norm': 0.0024909899925905934, 'learning_rate': 0.2989492498842809, 'epoch': 0.07} + 7%|▋ | 35/520 [02:45<30:36, 3.79s/it] 7%|▋ | 36/520 [02:48<30:23, 3.77s/it] {'loss': 1.4761, 'grad_norm': 0.0022306621515243968, 'learning_rate': 0.29883588099002584, 'epoch': 0.07} + 7%|▋ | 36/520 [02:48<30:23, 3.77s/it] 7%|▋ | 37/520 [02:52<30:07, 3.74s/it] {'loss': 1.455, 'grad_norm': 0.002027484837333336, 'learning_rate': 0.29871672920607156, 'epoch': 0.07} + 7%|▋ | 37/520 [02:52<30:07, 3.74s/it] 7%|▋ | 38/520 [02:56<30:00, 3.74s/it] {'loss': 1.5667, 'grad_norm': 0.002394725555719613, 'learning_rate': 0.2985917991619579, 'epoch': 0.07} + 7%|▋ | 38/520 [02:56<30:00, 3.74s/it] 8%|▊ | 39/520 [03:00<29:46, 3.71s/it] {'loss': 1.3945, 'grad_norm': 0.0026134713278444873, 'learning_rate': 0.2984610957117339, 'epoch': 0.07} + 8%|▊ | 39/520 [03:00<29:46, 3.71s/it] 8%|▊ | 40/520 [03:03<29:46, 3.72s/it] {'loss': 1.4353, 'grad_norm': 0.002063402884787752, 'learning_rate': 0.29832462393376924, 'epoch': 0.08} + 8%|▊ | 40/520 [03:03<29:46, 3.72s/it] 8%|▊ | 41/520 [03:07<29:36, 3.71s/it] {'loss': 1.4089, 'grad_norm': 0.0022246247864133343, 'learning_rate': 0.29818238913055717, 'epoch': 0.08} + 8%|▊ | 41/520 [03:07<29:36, 3.71s/it] 8%|▊ | 42/520 [03:11<29:33, 3.71s/it] {'loss': 1.4411, 'grad_norm': 0.0029892504749108625, 'learning_rate': 0.29803439682850813, 'epoch': 0.08} + 8%|▊ | 42/520 [03:11<29:33, 3.71s/it] 8%|▊ | 43/520 [03:14<29:31, 3.71s/it] {'loss': 1.3935, 'grad_norm': 0.0018752307240072707, 'learning_rate': 0.29788065277773534, 'epoch': 0.08} + 8%|▊ | 43/520 [03:14<29:31, 3.71s/it] 8%|▊ | 44/520 [03:18<29:20, 3.70s/it] {'loss': 1.4881, 'grad_norm': 0.002124493451620586, 'learning_rate': 0.2977211629518312, 'epoch': 0.08} + 8%|▊ | 44/520 [03:18<29:20, 3.70s/it] 9%|▊ | 45/520 [03:22<29:14, 3.69s/it] {'loss': 1.4637, 'grad_norm': 0.002301054220051549, 'learning_rate': 0.2975559335476352, 'epoch': 0.09} + 9%|▊ | 45/520 [03:22<29:14, 3.69s/it] 9%|▉ | 46/520 [03:25<29:12, 3.70s/it] {'loss': 1.5441, 'grad_norm': 0.0020660579121399296, 'learning_rate': 0.2973849709849932, 'epoch': 0.09} + 9%|▉ | 46/520 [03:25<29:12, 3.70s/it] 9%|▉ | 47/520 [03:29<29:12, 3.71s/it] {'loss': 1.4358, 'grad_norm': 0.002195400684383646, 'learning_rate': 0.29720828190650816, 'epoch': 0.09} + 9%|▉ | 47/520 [03:29<29:12, 3.71s/it] 9%|▉ | 48/520 [03:33<29:07, 3.70s/it] {'loss': 1.3997, 'grad_norm': 0.0022584290674196998, 'learning_rate': 0.2970258731772816, 'epoch': 0.09} + 9%|▉ | 48/520 [03:33<29:07, 3.70s/it] 9%|▉ | 49/520 [03:37<29:00, 3.70s/it] {'loss': 1.4584, 'grad_norm': 0.002318286270673026, 'learning_rate': 0.2968377518846473, 'epoch': 0.09} + 9%|▉ | 49/520 [03:37<29:00, 3.70s/it] 10%|▉ | 50/520 [03:40<28:55, 3.69s/it] {'loss': 1.4433, 'grad_norm': 0.0020702192210899344, 'learning_rate': 0.2966439253378957, 'epoch': 0.1} + 10%|▉ | 50/520 [03:40<28:55, 3.69s/it] 10%|▉ | 51/520 [03:44<28:56, 3.70s/it] {'loss': 1.3719, 'grad_norm': 0.0021887255722949114, 'learning_rate': 0.29644440106798997, 'epoch': 0.1} + 10%|▉ | 51/520 [03:44<28:56, 3.70s/it] 10%|█ | 52/520 [03:48<28:48, 3.69s/it] {'loss': 1.5123, 'grad_norm': 0.0022743913487449146, 'learning_rate': 0.2962391868272735, 'epoch': 0.1} + 10%|█ | 52/520 [03:48<28:48, 3.69s/it] 10%|█ | 53/520 [03:51<28:43, 3.69s/it] {'loss': 1.4917, 'grad_norm': 0.002061035553006893, 'learning_rate': 0.29602829058916846, 'epoch': 0.1} + 10%|█ | 53/520 [03:51<28:43, 3.69s/it] 10%|█ | 54/520 [03:55<28:51, 3.71s/it] {'loss': 1.4068, 'grad_norm': 0.001894030944540495, 'learning_rate': 0.29581172054786614, 'epoch': 0.1} + 10%|█ | 54/520 [03:55<28:51, 3.71s/it] 11%|█ | 55/520 [03:59<29:15, 3.78s/it] {'loss': 1.3727, 'grad_norm': 0.0020488870836879356, 'learning_rate': 0.29558948511800864, 'epoch': 0.11} + 11%|█ | 55/520 [03:59<29:15, 3.78s/it] 11%|█ | 56/520 [04:03<29:22, 3.80s/it] {'loss': 1.5091, 'grad_norm': 0.0021611949433475103, 'learning_rate': 0.2953615929343617, 'epoch': 0.11} + 11%|█ | 56/520 [04:03<29:22, 3.80s/it] 11%|█ | 57/520 [04:07<28:57, 3.75s/it] {'loss': 1.357, 'grad_norm': 0.0023148806979453006, 'learning_rate': 0.2951280528514794, 'epoch': 0.11} + 11%|█ | 57/520 [04:07<28:57, 3.75s/it] 11%|█ | 58/520 [04:10<28:53, 3.75s/it] {'loss': 1.527, 'grad_norm': 0.0016308237718086697, 'learning_rate': 0.2948888739433602, 'epoch': 0.11} + 11%|█ | 58/520 [04:10<28:53, 3.75s/it] 11%|█▏ | 59/520 [04:14<28:47, 3.75s/it] {'loss': 1.3529, 'grad_norm': 0.0017590609040655824, 'learning_rate': 0.29464406550309413, 'epoch': 0.11} + 11%|█▏ | 59/520 [04:14<28:47, 3.75s/it] 12%|█▏ | 60/520 [04:18<28:33, 3.72s/it] {'loss': 1.4345, 'grad_norm': 0.0016969849656049906, 'learning_rate': 0.29439363704250177, 'epoch': 0.12} + 12%|█▏ | 60/520 [04:18<28:33, 3.72s/it] 12%|█▏ | 61/520 [04:21<28:38, 3.75s/it] {'loss': 1.462, 'grad_norm': 0.0020504870740476362, 'learning_rate': 0.29413759829176495, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:21<28:38, 3.75s/it] 12%|█▏ | 62/520 [04:25<28:46, 3.77s/it] {'loss': 1.4094, 'grad_norm': 0.0018097909141455647, 'learning_rate': 0.29387595919904813, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:25<28:46, 3.77s/it] 12%|█▏ | 63/520 [04:29<28:48, 3.78s/it] {'loss': 1.4007, 'grad_norm': 0.0017709313405612761, 'learning_rate': 0.2936087299301127, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:29<28:48, 3.78s/it] 12%|█▏ | 64/520 [04:33<28:33, 3.76s/it] {'loss': 1.4259, 'grad_norm': 0.0017372660575749792, 'learning_rate': 0.2933359208679211, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:33<28:33, 3.76s/it] 12%|█▎ | 65/520 [04:37<28:23, 3.74s/it] {'loss': 1.4338, 'grad_norm': 0.002087381880530965, 'learning_rate': 0.293057542612234, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:37<28:23, 3.74s/it] 13%|█▎ | 66/520 [04:40<28:27, 3.76s/it] {'loss': 1.3821, 'grad_norm': 0.001476208917220252, 'learning_rate': 0.2927736059791984, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:40<28:27, 3.76s/it] 13%|█▎ | 67/520 [04:44<28:25, 3.77s/it] {'loss': 1.2686, 'grad_norm': 0.0015429263297754713, 'learning_rate': 0.2924841220009269, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:44<28:25, 3.77s/it] 13%|█▎ | 68/520 [04:48<28:08, 3.74s/it] {'loss': 1.3362, 'grad_norm': 0.0016476523298345083, 'learning_rate': 0.29218910192506975, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:48<28:08, 3.74s/it] 13%|█▎ | 69/520 [04:51<28:02, 3.73s/it] {'loss': 1.3093, 'grad_norm': 0.001492014314007091, 'learning_rate': 0.2918885572143774, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:51<28:02, 3.73s/it] 13%|█▎ | 70/520 [04:55<27:54, 3.72s/it] {'loss': 1.3638, 'grad_norm': 0.0017114127620400184, 'learning_rate': 0.29158249954625515, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:55<27:54, 3.72s/it] 14%|█▎ | 71/520 [04:59<27:42, 3.70s/it] {'loss': 1.2863, 'grad_norm': 0.0014997533707940608, 'learning_rate': 0.29127094081230953, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:59<27:42, 3.70s/it] 14%|█▍ | 72/520 [05:03<27:48, 3.72s/it] {'loss': 1.4297, 'grad_norm': 0.0015920534826208417, 'learning_rate': 0.2909538931178863, 'epoch': 0.14} + 14%|█▍ | 72/520 [05:03<27:48, 3.72s/it] 14%|█▍ | 73/520 [05:06<27:59, 3.76s/it] {'loss': 1.266, 'grad_norm': 0.0014139115537251969, 'learning_rate': 0.2906313687815999, 'epoch': 0.14} + 14%|█▍ | 73/520 [05:06<27:59, 3.76s/it] 14%|█▍ | 74/520 [05:10<28:03, 3.78s/it] {'loss': 1.3788, 'grad_norm': 0.00157273051675052, 'learning_rate': 0.2903033803348551, 'epoch': 0.14} + 14%|█▍ | 74/520 [05:10<28:03, 3.78s/it] 14%|█▍ | 75/520 [05:14<28:17, 3.81s/it] {'loss': 1.2868, 'grad_norm': 0.0014062938125729578, 'learning_rate': 0.28996994052135994, 'epoch': 0.14} + 14%|█▍ | 75/520 [05:14<28:17, 3.81s/it] 15%|█▍ | 76/520 [05:18<28:12, 3.81s/it] {'loss': 1.54, 'grad_norm': 0.004723572675090515, 'learning_rate': 0.2896310622966306, 'epoch': 0.15} + 15%|█▍ | 76/520 [05:18<28:12, 3.81s/it] 15%|█▍ | 77/520 [05:22<28:25, 3.85s/it] {'loss': 1.2138, 'grad_norm': 0.0015995673699662726, 'learning_rate': 0.289286758827488, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:22<28:25, 3.85s/it] 15%|█▌ | 78/520 [05:26<28:22, 3.85s/it] {'loss': 1.3345, 'grad_norm': 0.0014820490541774308, 'learning_rate': 0.2889370434915463, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:26<28:22, 3.85s/it] 15%|█▌ | 79/520 [05:30<28:18, 3.85s/it] {'loss': 1.3103, 'grad_norm': 0.001304599560561426, 'learning_rate': 0.288581929876693, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:30<28:18, 3.85s/it] 15%|█▌ | 80/520 [05:33<28:17, 3.86s/it] {'loss': 1.5582, 'grad_norm': 0.004332669236959319, 'learning_rate': 0.28822143178056114, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:34<28:17, 3.86s/it] 16%|█▌ | 81/520 [05:37<28:11, 3.85s/it] {'loss': 1.4579, 'grad_norm': 0.0016275235448333495, 'learning_rate': 0.28785556320999306, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:37<28:11, 3.85s/it] 16%|█▌ | 82/520 [05:41<28:05, 3.85s/it] {'loss': 1.3841, 'grad_norm': 0.0013196528834007275, 'learning_rate': 0.2874843383804964, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:41<28:05, 3.85s/it] 16%|█▌ | 83/520 [05:45<28:25, 3.90s/it] {'loss': 1.401, 'grad_norm': 0.0013672498451208592, 'learning_rate': 0.28710777171569146, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:45<28:25, 3.90s/it] 16%|█▌ | 84/520 [05:49<27:57, 3.85s/it] {'loss': 1.3965, 'grad_norm': 0.0013322435776711614, 'learning_rate': 0.28672587784675097, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:49<27:57, 3.85s/it] 16%|█▋ | 85/520 [05:53<27:26, 3.79s/it] {'loss': 1.4267, 'grad_norm': 0.0012392775491985306, 'learning_rate': 0.28633867161183163, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:53<27:26, 3.79s/it] 17%|█▋ | 86/520 [05:56<27:14, 3.77s/it] {'loss': 1.4396, 'grad_norm': 0.0012939946525557602, 'learning_rate': 0.2859461680554975, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:56<27:14, 3.77s/it] 17%|█▋ | 87/520 [06:00<27:13, 3.77s/it] {'loss': 1.4411, 'grad_norm': 0.0017673854219706336, 'learning_rate': 0.2855483824281355, 'epoch': 0.17} + 17%|█▋ | 87/520 [06:00<27:13, 3.77s/it] 17%|█▋ | 88/520 [06:04<27:23, 3.81s/it] {'loss': 1.4493, 'grad_norm': 0.0036939001399139792, 'learning_rate': 0.28514533018536287, 'epoch': 0.17} + 17%|█▋ | 88/520 [06:04<27:23, 3.81s/it] 17%|█▋ | 89/520 [06:08<27:05, 3.77s/it] {'loss': 1.3791, 'grad_norm': 0.001244903230209944, 'learning_rate': 0.2847370269874266, 'epoch': 0.17} + 17%|█▋ | 89/520 [06:08<27:05, 3.77s/it] 17%|█▋ | 90/520 [06:12<27:13, 3.80s/it] {'loss': 1.306, 'grad_norm': 0.0013015031879334278, 'learning_rate': 0.28432348869859514, 'epoch': 0.17} + 17%|█▋ | 90/520 [06:12<27:13, 3.80s/it] 18%|█▊ | 91/520 [06:15<26:58, 3.77s/it] {'loss': 1.3919, 'grad_norm': 0.0011344428492729263, 'learning_rate': 0.2839047313865417, 'epoch': 0.17} + 18%|█▊ | 91/520 [06:15<26:58, 3.77s/it] 18%|█▊ | 92/520 [06:19<26:42, 3.74s/it] {'loss': 1.3308, 'grad_norm': 0.0012838518695487996, 'learning_rate': 0.2834807713217203, 'epoch': 0.18} + 18%|█▊ | 92/520 [06:19<26:42, 3.74s/it] 18%|█▊ | 93/520 [06:23<26:30, 3.72s/it] {'loss': 1.3329, 'grad_norm': 0.0012077631684276962, 'learning_rate': 0.28305162497673325, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:23<26:30, 3.72s/it] 18%|█▊ | 94/520 [06:26<26:25, 3.72s/it] {'loss': 1.4318, 'grad_norm': 0.0012037415233326115, 'learning_rate': 0.28261730902569143, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:26<26:25, 3.72s/it] 18%|█▊ | 95/520 [06:30<26:15, 3.71s/it] {'loss': 1.3083, 'grad_norm': 0.0013636612775982876, 'learning_rate': 0.28217784034356636, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:30<26:15, 3.71s/it] 18%|█▊ | 96/520 [06:34<26:05, 3.69s/it] {'loss': 1.3269, 'grad_norm': 0.0010520570421985714, 'learning_rate': 0.2817332360055343, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:34<26:05, 3.69s/it] 19%|█▊ | 97/520 [06:37<26:03, 3.70s/it] {'loss': 1.2893, 'grad_norm': 0.0012269601979870085, 'learning_rate': 0.28128351328631307, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:37<26:03, 3.70s/it] 19%|█▉ | 98/520 [06:41<26:21, 3.75s/it] {'loss': 1.284, 'grad_norm': 0.0009333419308113481, 'learning_rate': 0.2808286896594908, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:41<26:21, 3.75s/it] 19%|█▉ | 99/520 [06:45<26:39, 3.80s/it] {'loss': 1.3053, 'grad_norm': 0.001159396894192528, 'learning_rate': 0.28036878279684696, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:45<26:39, 3.80s/it] 19%|█▉ | 100/520 [06:49<26:48, 3.83s/it] {'loss': 1.4022, 'grad_norm': 0.003220068185116982, 'learning_rate': 0.27990381056766583, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:49<26:48, 3.83s/it] 19%|█▉ | 101/520 [06:53<26:59, 3.87s/it] {'loss': 1.3133, 'grad_norm': 0.001128332091718983, 'learning_rate': 0.27943379103804195, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:53<26:59, 3.87s/it] 20%|█▉ | 102/520 [06:57<26:58, 3.87s/it] {'loss': 1.3139, 'grad_norm': 0.001445164004246216, 'learning_rate': 0.27895874247017854, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:57<26:58, 3.87s/it] 20%|█▉ | 103/520 [07:01<26:58, 3.88s/it] {'loss': 1.2473, 'grad_norm': 0.0011152988029886858, 'learning_rate': 0.27847868332167774, 'epoch': 0.2} + 20%|█▉ | 103/520 [07:01<26:58, 3.88s/it] 20%|██ | 104/520 [07:04<26:33, 3.83s/it] {'loss': 1.311, 'grad_norm': 0.001067462209654633, 'learning_rate': 0.2779936322448233, 'epoch': 0.2} + 20%|██ | 104/520 [07:04<26:33, 3.83s/it] 20%|██ | 105/520 [07:08<26:11, 3.79s/it] {'loss': 1.3139, 'grad_norm': 0.0009614505817562192, 'learning_rate': 0.27750360808585633, 'epoch': 0.2} + 20%|██ | 105/520 [07:08<26:11, 3.79s/it] 20%|██ | 106/520 [07:12<25:54, 3.76s/it] {'loss': 1.3857, 'grad_norm': 0.00130847374178332, 'learning_rate': 0.27700862988424263, 'epoch': 0.2} + 20%|██ | 106/520 [07:12<25:54, 3.76s/it] 21%|██ | 107/520 [07:16<25:44, 3.74s/it] {'loss': 1.342, 'grad_norm': 0.001379694387069093, 'learning_rate': 0.27650871687193285, 'epoch': 0.21} + 21%|██ | 107/520 [07:16<25:44, 3.74s/it] 21%|██ | 108/520 [07:19<25:36, 3.73s/it] {'loss': 1.2521, 'grad_norm': 0.0010594535083183203, 'learning_rate': 0.27600388847261575, 'epoch': 0.21} + 21%|██ | 108/520 [07:19<25:36, 3.73s/it] 21%|██ | 109/520 [07:23<25:50, 3.77s/it] {'loss': 1.3376, 'grad_norm': 0.0009934140783017752, 'learning_rate': 0.27549416430096296, 'epoch': 0.21} + 21%|██ | 109/520 [07:23<25:50, 3.77s/it] 21%|██ | 110/520 [07:27<25:41, 3.76s/it] {'loss': 1.4486, 'grad_norm': 0.001075657101292701, 'learning_rate': 0.2749795641618673, 'epoch': 0.21} + 21%|██ | 110/520 [07:27<25:41, 3.76s/it] 21%|██▏ | 111/520 [07:31<25:29, 3.74s/it] {'loss': 1.4516, 'grad_norm': 0.001118220963967385, 'learning_rate': 0.27446010804967313, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:31<25:29, 3.74s/it] 22%|██▏ | 112/520 [07:34<25:19, 3.72s/it] {'loss': 1.3343, 'grad_norm': 0.0010318254692551379, 'learning_rate': 0.2739358161473992, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:34<25:19, 3.72s/it] 22%|██▏ | 113/520 [07:38<25:10, 3.71s/it] {'loss': 1.2147, 'grad_norm': 0.0009600700222096489, 'learning_rate': 0.27340670882595497, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:38<25:10, 3.71s/it] 22%|██▏ | 114/520 [07:42<25:06, 3.71s/it] {'loss': 1.3185, 'grad_norm': 0.0009013974558997193, 'learning_rate': 0.2728728066433488, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:42<25:06, 3.71s/it] 22%|██▏ | 115/520 [07:45<25:05, 3.72s/it] {'loss': 1.4297, 'grad_norm': 0.000936673300104175, 'learning_rate': 0.27233413034388904, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:45<25:05, 3.72s/it] 22%|██▏ | 116/520 [07:49<24:55, 3.70s/it] {'loss': 1.4253, 'grad_norm': 0.000905200700984492, 'learning_rate': 0.2717907008573785, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:49<24:55, 3.70s/it] 22%|██▎ | 117/520 [07:53<24:49, 3.70s/it] {'loss': 1.3986, 'grad_norm': 0.0010009173324137062, 'learning_rate': 0.2712425392983008, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:53<24:49, 3.70s/it] 23%|██▎ | 118/520 [07:56<24:44, 3.69s/it] {'loss': 1.3068, 'grad_norm': 0.0009205829465838977, 'learning_rate': 0.27068966696500024, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:56<24:44, 3.69s/it] 23%|██▎ | 119/520 [08:00<24:41, 3.69s/it] {'loss': 1.2542, 'grad_norm': 0.0010256983797382478, 'learning_rate': 0.2701321053388542, 'epoch': 0.23} + 23%|██▎ | 119/520 [08:00<24:41, 3.69s/it] 23%|██▎ | 120/520 [08:04<24:36, 3.69s/it] {'loss': 1.2762, 'grad_norm': 0.0010708810566522101, 'learning_rate': 0.26956987608343835, 'epoch': 0.23} + 23%|██▎ | 120/520 [08:04<24:36, 3.69s/it] 23%|██▎ | 121/520 [08:07<24:32, 3.69s/it] {'loss': 1.3276, 'grad_norm': 0.000948885427462802, 'learning_rate': 0.26900300104368524, 'epoch': 0.23} + 23%|██▎ | 121/520 [08:07<24:32, 3.69s/it] 23%|██▎ | 122/520 [08:11<24:31, 3.70s/it] {'loss': 1.2197, 'grad_norm': 0.0008398288166658314, 'learning_rate': 0.2684315022450353, 'epoch': 0.23} + 23%|██▎ | 122/520 [08:11<24:31, 3.70s/it] 24%|██▎ | 123/520 [08:15<24:29, 3.70s/it] {'loss': 1.396, 'grad_norm': 0.0010710580269603109, 'learning_rate': 0.26785540189258106, 'epoch': 0.24} + 24%|██▎ | 123/520 [08:15<24:29, 3.70s/it] 24%|██▍ | 124/520 [08:19<24:27, 3.71s/it] {'loss': 1.304, 'grad_norm': 0.0010175673952303582, 'learning_rate': 0.2672747223702045, 'epoch': 0.24} + 24%|██▍ | 124/520 [08:19<24:27, 3.71s/it] 24%|██▍ | 125/520 [08:22<24:28, 3.72s/it] {'loss': 1.2844, 'grad_norm': 0.0009415741523211833, 'learning_rate': 0.26668948623970723, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:22<24:28, 3.72s/it] 24%|██▍ | 126/520 [08:27<25:49, 3.93s/it] {'loss': 1.3254, 'grad_norm': 0.0009649892718832056, 'learning_rate': 0.2660997162399341, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:27<25:49, 3.93s/it] 24%|██▍ | 127/520 [08:31<25:28, 3.89s/it] {'loss': 1.2595, 'grad_norm': 0.0009524088714299312, 'learning_rate': 0.26550543528588944, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:31<25:28, 3.89s/it] 25%|██▍ | 128/520 [08:34<25:06, 3.84s/it] {'loss': 1.3161, 'grad_norm': 0.0010113223993578975, 'learning_rate': 0.26490666646784666, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:34<25:06, 3.84s/it] 25%|██▍ | 129/520 [08:38<24:45, 3.80s/it] {'loss': 1.2671, 'grad_norm': 0.000767514021676153, 'learning_rate': 0.2643034330504516, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:38<24:45, 3.80s/it] 25%|██▌ | 130/520 [08:42<24:32, 3.78s/it] {'loss': 1.3101, 'grad_norm': 0.0008656667917100398, 'learning_rate': 0.2636957584718179, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:42<24:32, 3.78s/it] 25%|██▌ | 131/520 [08:45<24:22, 3.76s/it] {'loss': 1.3056, 'grad_norm': 0.0008903420597677203, 'learning_rate': 0.263083666342617, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:45<24:22, 3.76s/it] 25%|██▌ | 132/520 [08:49<24:11, 3.74s/it] {'loss': 1.3488, 'grad_norm': 0.0010313346555210828, 'learning_rate': 0.2624671804451601, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:49<24:11, 3.74s/it] 26%|██▌ | 133/520 [08:53<24:04, 3.73s/it] {'loss': 1.2761, 'grad_norm': 0.0009265407502650499, 'learning_rate': 0.2618463247324748, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:53<24:04, 3.73s/it] 26%|██▌ | 134/520 [08:57<23:54, 3.72s/it] {'loss': 1.3339, 'grad_norm': 0.0008560196422022314, 'learning_rate': 0.26122112332737396, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:57<23:54, 3.72s/it] 26%|██▌ | 135/520 [09:00<23:46, 3.71s/it] {'loss': 1.4068, 'grad_norm': 0.0008431394721933191, 'learning_rate': 0.2605916005215186, 'epoch': 0.26} + 26%|██▌ | 135/520 [09:00<23:46, 3.71s/it] 26%|██▌ | 136/520 [09:04<23:47, 3.72s/it] {'loss': 1.3347, 'grad_norm': 0.0009222829583118619, 'learning_rate': 0.2599577807744739, 'epoch': 0.26} + 26%|██▌ | 136/520 [09:04<23:47, 3.72s/it] 26%|██▋ | 137/520 [09:08<23:52, 3.74s/it] {'loss': 1.2491, 'grad_norm': 0.0009835584889996986, 'learning_rate': 0.25931968871275923, 'epoch': 0.26} + 26%|██▋ | 137/520 [09:08<23:52, 3.74s/it] 27%|██▋ | 138/520 [09:12<24:09, 3.79s/it] {'loss': 1.2662, 'grad_norm': 0.0007439125121429378, 'learning_rate': 0.2586773491288909, 'epoch': 0.27} + 27%|██▋ | 138/520 [09:12<24:09, 3.79s/it] 27%|██▋ | 139/520 [09:16<24:17, 3.83s/it] {'loss': 1.1975, 'grad_norm': 0.0008281221458027606, 'learning_rate': 0.258030786980419, 'epoch': 0.27} + 27%|██▋ | 139/520 [09:16<24:17, 3.83s/it] 27%|██▋ | 140/520 [09:19<24:16, 3.83s/it] {'loss': 1.3298, 'grad_norm': 0.0008244184861582419, 'learning_rate': 0.25738002738895777, 'epoch': 0.27} + 27%|██▋ | 140/520 [09:19<24:16, 3.83s/it] 27%|██▋ | 141/520 [09:23<24:19, 3.85s/it] {'loss': 1.3741, 'grad_norm': 0.0007926679817762659, 'learning_rate': 0.25672509563920953, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:23<24:19, 3.85s/it] 27%|██▋ | 142/520 [09:27<24:13, 3.84s/it] {'loss': 1.3719, 'grad_norm': 0.0009329430693270871, 'learning_rate': 0.2560660171779821, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:27<24:13, 3.84s/it] 28%|██▊ | 143/520 [09:31<24:07, 3.84s/it] {'loss': 1.304, 'grad_norm': 0.0009896927725671421, 'learning_rate': 0.2554028176132004, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:31<24:07, 3.84s/it] 28%|██▊ | 144/520 [09:35<24:06, 3.85s/it] {'loss': 1.2566, 'grad_norm': 0.0008436929854764497, 'learning_rate': 0.2547355227129109, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:35<24:06, 3.85s/it] 28%|██▊ | 145/520 [09:39<24:03, 3.85s/it] {'loss': 1.1909, 'grad_norm': 0.000742559950456034, 'learning_rate': 0.25406415840428126, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:39<24:03, 3.85s/it] 28%|██▊ | 146/520 [09:43<24:00, 3.85s/it] {'loss': 1.4037, 'grad_norm': 0.000803847742925283, 'learning_rate': 0.25338875077259204, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:43<24:00, 3.85s/it] 28%|██▊ | 147/520 [09:46<23:54, 3.85s/it] {'loss': 1.2317, 'grad_norm': 0.0007730974812901264, 'learning_rate': 0.252709326060224, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:46<23:54, 3.85s/it] 28%|██▊ | 148/520 [09:50<23:46, 3.83s/it] {'loss': 1.2586, 'grad_norm': 0.0007172068640256291, 'learning_rate': 0.2520259106656379, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:50<23:46, 3.83s/it] 29%|██▊ | 149/520 [09:54<23:44, 3.84s/it] {'loss': 1.2048, 'grad_norm': 0.0007533154595210784, 'learning_rate': 0.25133853114234905, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:54<23:44, 3.84s/it] 29%|██▉ | 150/520 [09:58<23:41, 3.84s/it] {'loss': 1.4214, 'grad_norm': 0.0007554587067034647, 'learning_rate': 0.2506472141978955, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:58<23:41, 3.84s/it] 29%|██▉ | 151/520 [10:02<23:41, 3.85s/it] {'loss': 1.2509, 'grad_norm': 0.0008094317076914262, 'learning_rate': 0.24995198669280058, 'epoch': 0.29} + 29%|██▉ | 151/520 [10:02<23:41, 3.85s/it] 29%|██▉ | 152/520 [10:06<23:35, 3.85s/it] {'loss': 1.2236, 'grad_norm': 0.0007675183854695816, 'learning_rate': 0.24925287563952891, 'epoch': 0.29} + 29%|██▉ | 152/520 [10:06<23:35, 3.85s/it] 29%|██▉ | 153/520 [10:09<23:27, 3.84s/it] {'loss': 1.2535, 'grad_norm': 0.0007371228627379318, 'learning_rate': 0.24854990820143708, 'epoch': 0.29} + 29%|██▉ | 153/520 [10:09<23:27, 3.84s/it] 30%|██▉ | 154/520 [10:13<23:26, 3.84s/it] {'loss': 1.3438, 'grad_norm': 0.000741748114790104, 'learning_rate': 0.24784311169171816, 'epoch': 0.3} + 30%|██▉ | 154/520 [10:13<23:26, 3.84s/it] 30%|██▉ | 155/520 [10:17<23:23, 3.84s/it] {'loss': 1.2534, 'grad_norm': 0.0008174957889912582, 'learning_rate': 0.24713251357234056, 'epoch': 0.3} + 30%|██▉ | 155/520 [10:17<23:23, 3.84s/it] 30%|███ | 156/520 [10:21<23:19, 3.84s/it] {'loss': 1.2827, 'grad_norm': 0.0009234802886773158, 'learning_rate': 0.24641814145298088, 'epoch': 0.3} + 30%|███ | 156/520 [10:21<23:19, 3.84s/it] 30%|███ | 157/520 [10:25<23:10, 3.83s/it] {'loss': 1.4058, 'grad_norm': 0.0007476032907631832, 'learning_rate': 0.24570002308995129, 'epoch': 0.3} + 30%|███ | 157/520 [10:25<23:10, 3.83s/it] 30%|███ | 158/520 [10:28<22:52, 3.79s/it] {'loss': 1.2573, 'grad_norm': 0.0007813717664687087, 'learning_rate': 0.24497818638512098, 'epoch': 0.3} + 30%|███ | 158/520 [10:28<22:52, 3.79s/it] 31%|███ | 159/520 [10:32<22:35, 3.76s/it] {'loss': 1.2912, 'grad_norm': 0.0007436633637584498, 'learning_rate': 0.2442526593848321, 'epoch': 0.31} + 31%|███ | 159/520 [10:32<22:35, 3.76s/it] 31%|███ | 160/520 [10:36<22:24, 3.74s/it] {'loss': 1.3188, 'grad_norm': 0.0007643547745362568, 'learning_rate': 0.24352347027881002, 'epoch': 0.31} + 31%|███ | 160/520 [10:36<22:24, 3.74s/it] 31%|███ | 161/520 [10:40<22:19, 3.73s/it] {'loss': 1.2861, 'grad_norm': 0.0006804580062165083, 'learning_rate': 0.24279064739906822, 'epoch': 0.31} + 31%|███ | 161/520 [10:40<22:19, 3.73s/it] 31%|███ | 162/520 [10:43<22:09, 3.71s/it] {'loss': 1.3434, 'grad_norm': 0.0008695682312849338, 'learning_rate': 0.2420542192188071, 'epoch': 0.31} + 31%|███ | 162/520 [10:43<22:09, 3.71s/it] 31%|███▏ | 163/520 [10:47<22:00, 3.70s/it] {'loss': 1.188, 'grad_norm': 0.000809918076112613, 'learning_rate': 0.2413142143513081, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:47<22:00, 3.70s/it] 32%|███▏ | 164/520 [10:51<21:54, 3.69s/it] {'loss': 1.1446, 'grad_norm': 0.0006813348867953147, 'learning_rate': 0.2405706615488216, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:51<21:54, 3.69s/it] 32%|███▏ | 165/520 [10:54<21:48, 3.68s/it] {'loss': 1.284, 'grad_norm': 0.0006532122765365387, 'learning_rate': 0.23982358970145004, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:54<21:48, 3.68s/it] 32%|███▏ | 166/520 [10:58<21:42, 3.68s/it] {'loss': 1.2556, 'grad_norm': 0.0007158485183448354, 'learning_rate': 0.23907302783602521, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:58<21:42, 3.68s/it] 32%|███▏ | 167/520 [11:02<21:41, 3.69s/it] {'loss': 1.2596, 'grad_norm': 0.000687513820249973, 'learning_rate': 0.2383190051149807, 'epoch': 0.32} + 32%|███▏ | 167/520 [11:02<21:41, 3.69s/it] 32%|███▏ | 168/520 [11:05<21:45, 3.71s/it] {'loss': 1.1937, 'grad_norm': 0.000721370795915831, 'learning_rate': 0.2375615508352185, 'epoch': 0.32} + 32%|███▏ | 168/520 [11:05<21:45, 3.71s/it] 32%|███▎ | 169/520 [11:09<21:38, 3.70s/it] {'loss': 1.2786, 'grad_norm': 0.0006457825666189719, 'learning_rate': 0.23680069442697088, 'epoch': 0.33} + 32%|███▎ | 169/520 [11:09<21:38, 3.70s/it] 33%|███▎ | 170/520 [11:13<21:34, 3.70s/it] {'loss': 1.2744, 'grad_norm': 0.0006645418473960115, 'learning_rate': 0.23603646545265689, 'epoch': 0.33} + 33%|███▎ | 170/520 [11:13<21:34, 3.70s/it] 33%|███▎ | 171/520 [11:16<21:34, 3.71s/it] {'loss': 1.2071, 'grad_norm': 0.0007146673467770755, 'learning_rate': 0.23526889360573386, 'epoch': 0.33} + 33%|███▎ | 171/520 [11:16<21:34, 3.71s/it] 33%|███▎ | 172/520 [11:20<21:21, 3.68s/it] {'loss': 1.2956, 'grad_norm': 0.0006745462044081571, 'learning_rate': 0.2344980087095433, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:20<21:21, 3.68s/it] 33%|███▎ | 173/520 [11:24<21:19, 3.69s/it] {'loss': 1.2211, 'grad_norm': 0.0006480488026837522, 'learning_rate': 0.2337238407161526, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:24<21:19, 3.69s/it] 33%|███▎ | 174/520 [11:27<21:14, 3.68s/it] {'loss': 1.2811, 'grad_norm': 0.0007336592537313868, 'learning_rate': 0.2329464197051909, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:27<21:14, 3.68s/it] 34%|███▎ | 175/520 [11:31<21:15, 3.70s/it] {'loss': 1.1969, 'grad_norm': 0.0009749887323913951, 'learning_rate': 0.2321657758826807, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:31<21:15, 3.70s/it] 34%|███▍ | 176/520 [11:35<21:08, 3.69s/it] {'loss': 1.3568, 'grad_norm': 0.0009111130825362629, 'learning_rate': 0.23138193957986392, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:35<21:08, 3.69s/it] 34%|███▍ | 177/520 [11:39<21:05, 3.69s/it] {'loss': 1.2247, 'grad_norm': 0.0006771387239876525, 'learning_rate': 0.2305949412520236, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:39<21:05, 3.69s/it] 34%|███▍ | 178/520 [11:42<21:06, 3.70s/it] {'loss': 1.2636, 'grad_norm': 0.0007276324335764747, 'learning_rate': 0.22980481147730047, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:42<21:06, 3.70s/it] 34%|███▍ | 179/520 [11:46<21:06, 3.71s/it] {'loss': 1.3459, 'grad_norm': 0.0006528054884985805, 'learning_rate': 0.22901158095550508, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:46<21:06, 3.71s/it] 35%|███▍ | 180/520 [11:50<20:56, 3.70s/it] {'loss': 1.2583, 'grad_norm': 0.000843099745829603, 'learning_rate': 0.2282152805069247, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:50<20:56, 3.70s/it] 35%|███▍ | 181/520 [11:53<20:51, 3.69s/it] {'loss': 1.2458, 'grad_norm': 0.0006422272021891171, 'learning_rate': 0.22741594107112598, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:53<20:51, 3.69s/it] 35%|███▌ | 182/520 [11:57<20:50, 3.70s/it] {'loss': 1.2523, 'grad_norm': 0.000650476618219611, 'learning_rate': 0.22661359370575285, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:57<20:50, 3.70s/it] 35%|███▌ | 183/520 [12:01<20:45, 3.70s/it] {'loss': 1.2677, 'grad_norm': 0.0006556099141483526, 'learning_rate': 0.22580826958531963, 'epoch': 0.35} + 35%|███▌ | 183/520 [12:01<20:45, 3.70s/it] 35%|███▌ | 184/520 [12:04<20:42, 3.70s/it] {'loss': 1.1961, 'grad_norm': 0.0006552718789403436, 'learning_rate': 0.22499999999999998, 'epoch': 0.35} + 35%|███▌ | 184/520 [12:04<20:42, 3.70s/it] 36%|███▌ | 185/520 [12:08<20:35, 3.69s/it] {'loss': 1.3455, 'grad_norm': 0.0006272586530517356, 'learning_rate': 0.2241888163544111, 'epoch': 0.36} + 36%|███▌ | 185/520 [12:08<20:35, 3.69s/it] 36%|███▌ | 186/520 [12:12<20:28, 3.68s/it] {'loss': 1.2205, 'grad_norm': 0.0006190976486190542, 'learning_rate': 0.2233747501663934, 'epoch': 0.36} + 36%|███▌ | 186/520 [12:12<20:28, 3.68s/it] 36%|███▌ | 187/520 [12:16<20:31, 3.70s/it] {'loss': 1.2256, 'grad_norm': 0.0007353159891941774, 'learning_rate': 0.22255783306578597, 'epoch': 0.36} + 36%|███▌ | 187/520 [12:16<20:31, 3.70s/it] 36%|███▌ | 188/520 [12:19<20:21, 3.68s/it] {'loss': 1.3156, 'grad_norm': 0.0006734495014163246, 'learning_rate': 0.22173809679319773, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:19<20:21, 3.68s/it] 36%|███▋ | 189/520 [12:23<20:20, 3.69s/it] {'loss': 1.3162, 'grad_norm': 0.0007803096640817021, 'learning_rate': 0.22091557319877406, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:23<20:20, 3.69s/it] 37%|███▋ | 190/520 [12:27<20:16, 3.69s/it] {'loss': 1.2348, 'grad_norm': 0.0006647603283489206, 'learning_rate': 0.2200902942409593, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:27<20:16, 3.69s/it] 37%|███▋ | 191/520 [12:30<20:11, 3.68s/it] {'loss': 1.1972, 'grad_norm': 0.0006147710351170518, 'learning_rate': 0.21926229198525513, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:30<20:11, 3.68s/it] 37%|███▋ | 192/520 [12:34<20:18, 3.71s/it] {'loss': 1.2807, 'grad_norm': 0.0006143025670810756, 'learning_rate': 0.21843159860297445, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:34<20:18, 3.71s/it] 37%|███▋ | 193/520 [12:38<20:09, 3.70s/it] {'loss': 1.2794, 'grad_norm': 0.0007208642114743943, 'learning_rate': 0.2175982463699918, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:38<20:09, 3.70s/it] 37%|███▋ | 194/520 [12:41<20:05, 3.70s/it] {'loss': 1.1752, 'grad_norm': 0.0006059813703796028, 'learning_rate': 0.2167622676654888, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:41<20:05, 3.70s/it] 38%|███▊ | 195/520 [12:45<19:59, 3.69s/it] {'loss': 1.2945, 'grad_norm': 0.0006506057696362992, 'learning_rate': 0.21592369497069674, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:45<19:59, 3.69s/it] 38%|███▊ | 196/520 [12:49<19:54, 3.69s/it] {'loss': 1.2578, 'grad_norm': 0.0007001178271890407, 'learning_rate': 0.2150825608676337, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:49<19:54, 3.69s/it] 38%|███▊ | 197/520 [12:52<19:55, 3.70s/it] {'loss': 1.21, 'grad_norm': 0.0006577468747664751, 'learning_rate': 0.21423889803783938, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:52<19:55, 3.70s/it] 38%|███▊ | 198/520 [12:56<19:49, 3.69s/it] {'loss': 1.2766, 'grad_norm': 0.0006756768090610424, 'learning_rate': 0.2133927392611049, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:56<19:49, 3.69s/it] 38%|███▊ | 199/520 [13:00<19:44, 3.69s/it] {'loss': 1.2005, 'grad_norm': 0.0006285503401368488, 'learning_rate': 0.21254411741419924, 'epoch': 0.38} + 38%|███▊ | 199/520 [13:00<19:44, 3.69s/it] 38%|███▊ | 200/520 [13:04<19:40, 3.69s/it] {'loss': 1.2379, 'grad_norm': 0.0007026056065867105, 'learning_rate': 0.21169306546959177, 'epoch': 0.38} + 38%|███▊ | 200/520 [13:04<19:40, 3.69s/it] 39%|███▊ | 201/520 [13:07<19:35, 3.69s/it] {'loss': 1.2478, 'grad_norm': 0.0005876589897789897, 'learning_rate': 0.21083961649417127, 'epoch': 0.39} + 39%|███▊ | 201/520 [13:07<19:35, 3.69s/it] 39%|███▉ | 202/520 [13:11<19:34, 3.69s/it] {'loss': 1.217, 'grad_norm': 0.000681992417653471, 'learning_rate': 0.20998380364796113, 'epoch': 0.39} + 39%|███▉ | 202/520 [13:11<19:34, 3.69s/it] 39%|███▉ | 203/520 [13:15<19:33, 3.70s/it] {'loss': 1.2604, 'grad_norm': 0.0007151184872039712, 'learning_rate': 0.2091256601828309, 'epoch': 0.39} + 39%|███▉ | 203/520 [13:15<19:33, 3.70s/it] 39%|███▉ | 204/520 [13:18<19:24, 3.69s/it] {'loss': 1.2741, 'grad_norm': 0.0006516456571464083, 'learning_rate': 0.2082652194412042, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:18<19:24, 3.69s/it] 39%|███▉ | 205/520 [13:22<19:24, 3.70s/it] {'loss': 1.2557, 'grad_norm': 0.0006453641692270767, 'learning_rate': 0.20740251485476346, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:22<19:24, 3.70s/it] 40%|███▉ | 206/520 [13:26<19:22, 3.70s/it] {'loss': 1.3173, 'grad_norm': 0.0006282561300653919, 'learning_rate': 0.20653757994315078, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:26<19:22, 3.70s/it] 40%|███▉ | 207/520 [13:29<19:17, 3.70s/it] {'loss': 1.2279, 'grad_norm': 0.0006760931540248166, 'learning_rate': 0.20567044831266568, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:29<19:17, 3.70s/it] 40%|████ | 208/520 [13:33<19:12, 3.69s/it] {'loss': 1.3018, 'grad_norm': 0.0006670413294779211, 'learning_rate': 0.2048011536549593, 'epoch': 0.4} + 40%|████ | 208/520 [13:33<19:12, 3.69s/it] 40%|████ | 209/520 [13:37<19:11, 3.70s/it] {'loss': 1.21, 'grad_norm': 0.0005962295560332697, 'learning_rate': 0.20392972974572512, 'epoch': 0.4} + 40%|████ | 209/520 [13:37<19:11, 3.70s/it] 40%|████ | 210/520 [13:40<19:04, 3.69s/it] {'loss': 1.2842, 'grad_norm': 0.0006210774243391739, 'learning_rate': 0.2030562104433872, 'epoch': 0.4} + 40%|████ | 210/520 [13:40<19:04, 3.69s/it] 41%|████ | 211/520 [13:44<19:09, 3.72s/it] {'loss': 1.2914, 'grad_norm': 0.0005633972309661352, 'learning_rate': 0.20218062968778405, 'epoch': 0.41} + 41%|████ | 211/520 [13:44<19:09, 3.72s/it] 41%|████ | 212/520 [13:48<19:00, 3.70s/it] {'loss': 1.2801, 'grad_norm': 0.0007131226092188257, 'learning_rate': 0.20130302149885032, 'epoch': 0.41} + 41%|████ | 212/520 [13:48<19:00, 3.70s/it] 41%|████ | 213/520 [13:52<18:57, 3.70s/it] {'loss': 1.2269, 'grad_norm': 0.0007751120973012457, 'learning_rate': 0.20042341997529464, 'epoch': 0.41} + 41%|████ | 213/520 [13:52<18:57, 3.70s/it] 41%|████ | 214/520 [13:55<18:56, 3.71s/it] {'loss': 1.2263, 'grad_norm': 0.0007646051033545685, 'learning_rate': 0.1995418592932751, 'epoch': 0.41} + 41%|████ | 214/520 [13:55<18:56, 3.71s/it] 41%|████▏ | 215/520 [13:59<18:50, 3.71s/it] {'loss': 1.1787, 'grad_norm': 0.0005761048447899787, 'learning_rate': 0.19865837370507108, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:59<18:50, 3.71s/it] 42%|████▏ | 216/520 [14:03<18:47, 3.71s/it] {'loss': 1.1361, 'grad_norm': 0.0006433417899910721, 'learning_rate': 0.19777299753775268, 'epoch': 0.42} + 42%|████▏ | 216/520 [14:03<18:47, 3.71s/it] 42%|████▏ | 217/520 [14:06<18:43, 3.71s/it] {'loss': 1.2801, 'grad_norm': 0.0006428511383134073, 'learning_rate': 0.19688576519184667, 'epoch': 0.42} + 42%|████▏ | 217/520 [14:06<18:43, 3.71s/it] 42%|████▏ | 218/520 [14:10<18:35, 3.69s/it] {'loss': 1.2554, 'grad_norm': 0.0006564556389103534, 'learning_rate': 0.19599671114000014, 'epoch': 0.42} + 42%|████▏ | 218/520 [14:10<18:35, 3.69s/it] 42%|████▏ | 219/520 [14:14<18:31, 3.69s/it] {'loss': 1.2646, 'grad_norm': 0.0005596809171119324, 'learning_rate': 0.19510586992564094, 'epoch': 0.42} + 42%|████▏ | 219/520 [14:14<18:31, 3.69s/it] 42%|████▏ | 220/520 [14:17<18:26, 3.69s/it] {'loss': 1.2316, 'grad_norm': 0.0005701263574681053, 'learning_rate': 0.19421327616163564, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:17<18:26, 3.69s/it] 42%|████▎ | 221/520 [14:21<18:23, 3.69s/it] {'loss': 1.2575, 'grad_norm': 0.000599012947230993, 'learning_rate': 0.19331896452894448, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:21<18:23, 3.69s/it] 43%|████▎ | 222/520 [14:25<18:31, 3.73s/it] {'loss': 1.2011, 'grad_norm': 0.0005935985280795666, 'learning_rate': 0.19242296977527412, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:25<18:31, 3.73s/it] 43%|████▎ | 223/520 [14:29<18:41, 3.78s/it] {'loss': 1.1902, 'grad_norm': 0.0005966390224538371, 'learning_rate': 0.19152532671372738, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:29<18:41, 3.78s/it] 43%|████▎ | 224/520 [14:33<18:54, 3.83s/it] {'loss': 1.3537, 'grad_norm': 0.0006610791324076013, 'learning_rate': 0.19062607022145078, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:33<18:54, 3.83s/it] 43%|████▎ | 225/520 [14:37<19:00, 3.87s/it] {'loss': 1.1975, 'grad_norm': 0.0005761963402637915, 'learning_rate': 0.18972523523827908, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:37<19:00, 3.87s/it] 43%|████▎ | 226/520 [14:41<19:04, 3.89s/it] {'loss': 1.2979, 'grad_norm': 0.0006112228831886362, 'learning_rate': 0.1888228567653781, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:41<19:04, 3.89s/it] 44%|████▎ | 227/520 [14:45<18:59, 3.89s/it] {'loss': 1.2818, 'grad_norm': 0.0005642599114749979, 'learning_rate': 0.1879189698638846, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:45<18:59, 3.89s/it] 44%|████▍ | 228/520 [14:49<18:57, 3.89s/it] {'loss': 1.3501, 'grad_norm': 0.0006303446056113972, 'learning_rate': 0.18701360965354402, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:49<18:57, 3.89s/it] 44%|████▍ | 229/520 [14:52<18:53, 3.90s/it] {'loss': 1.261, 'grad_norm': 0.0005302273693362286, 'learning_rate': 0.18610681131134596, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:52<18:53, 3.90s/it] 44%|████▍ | 230/520 [14:56<18:52, 3.90s/it] {'loss': 1.1494, 'grad_norm': 0.0005881590888782069, 'learning_rate': 0.18519861007015728, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:56<18:52, 3.90s/it] 44%|████▍ | 231/520 [15:00<18:45, 3.90s/it] {'loss': 1.2111, 'grad_norm': 0.0006837971473178893, 'learning_rate': 0.18428904121735343, 'epoch': 0.44} + 44%|████▍ | 231/520 [15:00<18:45, 3.90s/it] 45%|████▍ | 232/520 [15:04<18:40, 3.89s/it] {'loss': 1.3887, 'grad_norm': 0.000647276463034255, 'learning_rate': 0.18337814009344713, 'epoch': 0.45} + 45%|████▍ | 232/520 [15:04<18:40, 3.89s/it] 45%|████▍ | 233/520 [15:08<18:33, 3.88s/it] {'loss': 1.2676, 'grad_norm': 0.0006272286237640553, 'learning_rate': 0.18246594209071543, 'epoch': 0.45} + 45%|████▍ | 233/520 [15:08<18:33, 3.88s/it] 45%|████▌ | 234/520 [15:12<18:29, 3.88s/it] {'loss': 1.1686, 'grad_norm': 0.000647174668962953, 'learning_rate': 0.18155248265182436, 'epoch': 0.45} + 45%|████▌ | 234/520 [15:12<18:29, 3.88s/it] 45%|████▌ | 235/520 [15:16<18:27, 3.89s/it] {'loss': 1.2125, 'grad_norm': 0.0005851411911456889, 'learning_rate': 0.18063779726845205, 'epoch': 0.45} + 45%|████▌ | 235/520 [15:16<18:27, 3.89s/it] 45%|████▌ | 236/520 [15:20<18:21, 3.88s/it] {'loss': 1.3074, 'grad_norm': 0.0005652330060104832, 'learning_rate': 0.17972192147990965, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:20<18:21, 3.88s/it] 46%|████▌ | 237/520 [15:23<18:18, 3.88s/it] {'loss': 1.2815, 'grad_norm': 0.0005419633488992075, 'learning_rate': 0.17880489087176044, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:24<18:18, 3.88s/it] 46%|████▌ | 238/520 [15:27<18:12, 3.87s/it] {'loss': 1.2264, 'grad_norm': 0.00061887592961183, 'learning_rate': 0.17788674107443722, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:27<18:12, 3.87s/it] 46%|████▌ | 239/520 [15:31<18:05, 3.86s/it] {'loss': 1.3074, 'grad_norm': 0.0006069004668653047, 'learning_rate': 0.1769675077618579, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:31<18:05, 3.86s/it] 46%|████▌ | 240/520 [15:35<17:51, 3.83s/it] {'loss': 1.107, 'grad_norm': 0.0005128782006804626, 'learning_rate': 0.17604722665003958, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:35<17:51, 3.83s/it] 46%|████▋ | 241/520 [15:39<17:39, 3.80s/it] {'loss': 1.1843, 'grad_norm': 0.0005516718064098332, 'learning_rate': 0.17512593349571046, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:39<17:39, 3.80s/it] 47%|████▋ | 242/520 [15:42<17:27, 3.77s/it] {'loss': 1.2064, 'grad_norm': 0.0005547607581438364, 'learning_rate': 0.174203664094921, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:42<17:27, 3.77s/it] 47%|████▋ | 243/520 [15:46<17:20, 3.76s/it] {'loss': 1.2055, 'grad_norm': 0.0005759853219326282, 'learning_rate': 0.17328045428165273, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:46<17:20, 3.76s/it] 47%|████▋ | 244/520 [15:50<17:08, 3.73s/it] {'loss': 1.3183, 'grad_norm': 0.0005929834624171716, 'learning_rate': 0.17235633992642616, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:50<17:08, 3.73s/it] 47%|████▋ | 245/520 [15:53<17:05, 3.73s/it] {'loss': 1.1934, 'grad_norm': 0.000579773788216065, 'learning_rate': 0.171431356934907, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:53<17:05, 3.73s/it] 47%|████▋ | 246/520 [15:57<17:03, 3.73s/it] {'loss': 1.3675, 'grad_norm': 0.0006091722578188429, 'learning_rate': 0.17050554124651102, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:57<17:03, 3.73s/it] 48%|████▊ | 247/520 [16:01<16:54, 3.72s/it] {'loss': 1.3595, 'grad_norm': 0.0007220156249738729, 'learning_rate': 0.16957892883300776, 'epoch': 0.47} + 48%|████▊ | 247/520 [16:01<16:54, 3.72s/it] 48%|████▊ | 248/520 [16:05<16:51, 3.72s/it] {'loss': 1.186, 'grad_norm': 0.0006053422063228816, 'learning_rate': 0.16865155569712278, 'epoch': 0.48} + 48%|████▊ | 248/520 [16:05<16:51, 3.72s/it] 48%|████▊ | 249/520 [16:08<16:46, 3.72s/it] {'loss': 1.2855, 'grad_norm': 0.0006045109334583047, 'learning_rate': 0.16772345787113893, 'epoch': 0.48} + 48%|████▊ | 249/520 [16:08<16:46, 3.72s/it] 48%|████▊ | 250/520 [16:12<16:47, 3.73s/it] {'loss': 1.2314, 'grad_norm': 0.0006128170296560064, 'learning_rate': 0.16679467141549617, 'epoch': 0.48} + 48%|████▊ | 250/520 [16:12<16:47, 3.73s/it] 48%|████▊ | 251/520 [16:16<16:40, 3.72s/it] {'loss': 1.29, 'grad_norm': 0.0005405241179205557, 'learning_rate': 0.16586523241739068, 'epoch': 0.48} + 48%|████▊ | 251/520 [16:16<16:40, 3.72s/it] 48%|████▊ | 252/520 [16:19<16:34, 3.71s/it] {'loss': 1.2614, 'grad_norm': 0.0005503079291174904, 'learning_rate': 0.16493517698937252, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:19<16:34, 3.71s/it] 49%|████▊ | 253/520 [16:23<16:33, 3.72s/it] {'loss': 1.2715, 'grad_norm': 0.0006146442008079823, 'learning_rate': 0.1640045412679426, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:23<16:33, 3.72s/it] 49%|████▉ | 254/520 [16:27<16:29, 3.72s/it] {'loss': 1.2116, 'grad_norm': 0.0005257470656586243, 'learning_rate': 0.16307336141214876, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:27<16:29, 3.72s/it] 49%|████▉ | 255/520 [16:31<16:20, 3.70s/it] {'loss': 1.2189, 'grad_norm': 0.0005848157654531185, 'learning_rate': 0.16214167360218049, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:31<16:20, 3.70s/it] 49%|████▉ | 256/520 [16:34<16:18, 3.70s/it] {'loss': 1.2676, 'grad_norm': 0.0006656456797142713, 'learning_rate': 0.16120951403796366, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:34<16:18, 3.70s/it] 49%|████▉ | 257/520 [16:38<16:11, 3.70s/it] {'loss': 1.26, 'grad_norm': 0.0006160048863328119, 'learning_rate': 0.1602769189377535, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:38<16:11, 3.70s/it] 50%|████▉ | 258/520 [16:42<16:09, 3.70s/it] {'loss': 1.2657, 'grad_norm': 0.0004914253297308576, 'learning_rate': 0.15934392453672783, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:42<16:09, 3.70s/it] 50%|████▉ | 259/520 [16:45<16:05, 3.70s/it] {'loss': 1.3359, 'grad_norm': 0.0005953629293586975, 'learning_rate': 0.15841056708557877, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:45<16:05, 3.70s/it] 50%|█████ | 260/520 [16:49<16:07, 3.72s/it] {'loss': 1.3217, 'grad_norm': 0.0005659326597251885, 'learning_rate': 0.15747688284910458, 'epoch': 0.5} + 50%|█████ | 260/520 [16:49<16:07, 3.72s/it] 50%|█████ | 261/520 [16:53<16:04, 3.72s/it] {'loss': 1.2666, 'grad_norm': 0.0005514674952381218, 'learning_rate': 0.1565429081048004, 'epoch': 0.5} + 50%|█████ | 261/520 [16:53<16:04, 3.72s/it] 50%|█████ | 262/520 [16:57<16:06, 3.74s/it] {'loss': 1.196, 'grad_norm': 0.0005485380195344741, 'learning_rate': 0.15560867914144888, 'epoch': 0.5} + 50%|█████ | 262/520 [16:57<16:06, 3.74s/it] 51%|█████ | 263/520 [17:00<15:58, 3.73s/it] {'loss': 1.2765, 'grad_norm': 0.0005520677653633453, 'learning_rate': 0.15467423225770996, 'epoch': 0.51} + 51%|█████ | 263/520 [17:00<15:58, 3.73s/it] 51%|█████ | 264/520 [17:04<15:54, 3.73s/it] {'loss': 1.2961, 'grad_norm': 0.0005239743459210808, 'learning_rate': 0.15373960376071094, 'epoch': 0.51} + 51%|█████ | 264/520 [17:04<15:54, 3.73s/it] 51%|█████ | 265/520 [17:08<15:50, 3.73s/it] {'loss': 1.2077, 'grad_norm': 0.000606279168819177, 'learning_rate': 0.15280482996463532, 'epoch': 0.51} + 51%|█████ | 265/520 [17:08<15:50, 3.73s/it] 51%|█████ | 266/520 [17:12<15:50, 3.74s/it] {'loss': 1.0804, 'grad_norm': 0.00045299338655386287, 'learning_rate': 0.15186994718931227, 'epoch': 0.51} + 51%|█████ | 266/520 [17:12<15:50, 3.74s/it] 51%|█████▏ | 267/520 [17:16<16:00, 3.80s/it] {'loss': 1.2085, 'grad_norm': 0.0005072669972576723, 'learning_rate': 0.15093499175880504, 'epoch': 0.51} + 51%|█████▏ | 267/520 [17:16<16:00, 3.80s/it] 52%|█████▏ | 268/520 [17:19<16:03, 3.83s/it] {'loss': 1.3974, 'grad_norm': 0.0005882580877875384, 'learning_rate': 0.15, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:19<16:03, 3.83s/it] 52%|█████▏ | 269/520 [17:23<16:07, 3.85s/it] {'loss': 1.3003, 'grad_norm': 0.0005522752974357662, 'learning_rate': 0.14906500824119495, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:23<16:07, 3.85s/it] 52%|█████▏ | 270/520 [17:27<16:08, 3.88s/it] {'loss': 1.2144, 'grad_norm': 0.0005777299044177762, 'learning_rate': 0.14813005281068775, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:27<16:08, 3.88s/it] 52%|█████▏ | 271/520 [17:31<16:06, 3.88s/it] {'loss': 1.2891, 'grad_norm': 0.0005733020872177552, 'learning_rate': 0.1471951700353647, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:31<16:06, 3.88s/it] 52%|█████▏ | 272/520 [17:35<16:05, 3.89s/it] {'loss': 1.2249, 'grad_norm': 0.0005342167823695485, 'learning_rate': 0.14626039623928908, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:35<16:05, 3.89s/it] 52%|█████▎ | 273/520 [17:39<16:01, 3.89s/it] {'loss': 1.3472, 'grad_norm': 0.0009031012543184462, 'learning_rate': 0.14532576774229006, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:39<16:01, 3.89s/it] 53%|█████▎ | 274/520 [17:43<15:57, 3.89s/it] {'loss': 1.261, 'grad_norm': 0.000571945407902762, 'learning_rate': 0.14439132085855116, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:43<15:57, 3.89s/it] 53%|█████▎ | 275/520 [17:47<15:41, 3.84s/it] {'loss': 1.2058, 'grad_norm': 0.0005634040184592522, 'learning_rate': 0.14345709189519962, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:47<15:41, 3.84s/it] 53%|█████▎ | 276/520 [17:50<15:26, 3.80s/it] {'loss': 1.271, 'grad_norm': 0.0005684841006336768, 'learning_rate': 0.1425231171508954, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:50<15:26, 3.80s/it] 53%|█████▎ | 277/520 [17:54<15:28, 3.82s/it] {'loss': 1.3469, 'grad_norm': 0.0005131126331563038, 'learning_rate': 0.14158943291442122, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:54<15:28, 3.82s/it] 53%|█████▎ | 278/520 [17:58<15:30, 3.85s/it] {'loss': 1.1627, 'grad_norm': 0.0004921792131244052, 'learning_rate': 0.1406560754632722, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:58<15:30, 3.85s/it] 54%|█████▎ | 279/520 [18:02<15:30, 3.86s/it] {'loss': 1.2287, 'grad_norm': 0.0005977100545879355, 'learning_rate': 0.13972308106224648, 'epoch': 0.54} + 54%|█████▎ | 279/520 [18:02<15:30, 3.86s/it] 54%|█████▍ | 280/520 [18:06<15:30, 3.88s/it] {'loss': 1.2079, 'grad_norm': 0.0006159334455087636, 'learning_rate': 0.13879048596203636, 'epoch': 0.54} + 54%|█████▍ | 280/520 [18:06<15:30, 3.88s/it] 54%|█████▍ | 281/520 [18:10<15:27, 3.88s/it] {'loss': 1.3067, 'grad_norm': 0.0006029155166695183, 'learning_rate': 0.1378583263978195, 'epoch': 0.54} + 54%|█████▍ | 281/520 [18:10<15:27, 3.88s/it] 54%|█████▍ | 282/520 [18:14<15:24, 3.89s/it] {'loss': 1.1823, 'grad_norm': 0.0005202987307507895, 'learning_rate': 0.13692663858785126, 'epoch': 0.54} + 54%|█████▍ | 282/520 [18:14<15:24, 3.89s/it] 54%|█████▍ | 283/520 [18:18<15:23, 3.90s/it] {'loss': 1.3295, 'grad_norm': 0.0005879158589588095, 'learning_rate': 0.1359954587320574, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:18<15:23, 3.90s/it] 55%|█████▍ | 284/520 [18:21<15:15, 3.88s/it] {'loss': 1.2269, 'grad_norm': 0.0006090385169021105, 'learning_rate': 0.13506482301062753, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:21<15:15, 3.88s/it] 55%|█████▍ | 285/520 [18:25<15:02, 3.84s/it] {'loss': 1.2027, 'grad_norm': 0.0005588838098590679, 'learning_rate': 0.13413476758260934, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:25<15:02, 3.84s/it] 55%|█████▌ | 286/520 [18:29<14:50, 3.81s/it] {'loss': 1.0759, 'grad_norm': 0.0005213397326469844, 'learning_rate': 0.13320532858450382, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:29<14:50, 3.81s/it] 55%|█████▌ | 287/520 [18:33<14:42, 3.79s/it] {'loss': 1.3236, 'grad_norm': 0.000533033422764718, 'learning_rate': 0.13227654212886109, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:33<14:42, 3.79s/it] 55%|█████▌ | 288/520 [18:36<14:32, 3.76s/it] {'loss': 1.3503, 'grad_norm': 0.0005184730952271666, 'learning_rate': 0.13134844430287726, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:36<14:32, 3.76s/it] 56%|█████▌ | 289/520 [18:40<14:26, 3.75s/it] {'loss': 1.2132, 'grad_norm': 0.0005282948774781361, 'learning_rate': 0.13042107116699228, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:40<14:26, 3.75s/it] 56%|█████▌ | 290/520 [18:44<14:20, 3.74s/it] {'loss': 1.1439, 'grad_norm': 0.00048083891015578465, 'learning_rate': 0.129494458753489, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:44<14:20, 3.74s/it] 56%|█████▌ | 291/520 [18:48<14:19, 3.75s/it] {'loss': 1.1825, 'grad_norm': 0.000536400756006179, 'learning_rate': 0.12856864306509302, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:48<14:19, 3.75s/it] 56%|█████▌ | 292/520 [18:51<14:10, 3.73s/it] {'loss': 1.2389, 'grad_norm': 0.0005085722284335702, 'learning_rate': 0.1276436600735738, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:51<14:10, 3.73s/it] 56%|█████▋ | 293/520 [18:55<14:09, 3.74s/it] {'loss': 1.1881, 'grad_norm': 0.0005421562193635492, 'learning_rate': 0.12671954571834726, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:55<14:09, 3.74s/it] 57%|█████▋ | 294/520 [18:59<14:06, 3.75s/it] {'loss': 1.2084, 'grad_norm': 0.0005483185781133283, 'learning_rate': 0.125796335905079, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:59<14:06, 3.75s/it] 57%|█████▋ | 295/520 [19:03<14:14, 3.80s/it] {'loss': 1.2857, 'grad_norm': 0.0005305677384387585, 'learning_rate': 0.12487406650428955, 'epoch': 0.57} + 57%|█████▋ | 295/520 [19:03<14:14, 3.80s/it] 57%|█████▋ | 296/520 [19:07<14:21, 3.85s/it] {'loss': 1.1636, 'grad_norm': 0.0005335469672020539, 'learning_rate': 0.12395277334996045, 'epoch': 0.57} + 57%|█████▋ | 296/520 [19:07<14:21, 3.85s/it] 57%|█████▋ | 297/520 [19:11<14:25, 3.88s/it] {'loss': 1.2962, 'grad_norm': 0.0006167983113005675, 'learning_rate': 0.1230324922381421, 'epoch': 0.57} + 57%|█████▋ | 297/520 [19:11<14:25, 3.88s/it] 57%|█████▋ | 298/520 [19:15<14:22, 3.88s/it] {'loss': 1.254, 'grad_norm': 0.000488457107389671, 'learning_rate': 0.12211325892556281, 'epoch': 0.57} + 57%|█████▋ | 298/520 [19:15<14:22, 3.88s/it] 57%|█████▊ | 299/520 [19:18<14:11, 3.85s/it] {'loss': 1.3071, 'grad_norm': 0.000527586453406657, 'learning_rate': 0.12119510912823958, 'epoch': 0.57} + 57%|█████▊ | 299/520 [19:18<14:11, 3.85s/it] 58%|█████▊ | 300/520 [19:22<13:57, 3.81s/it] {'loss': 1.31, 'grad_norm': 0.0005489657438454039, 'learning_rate': 0.12027807852009038, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:22<13:57, 3.81s/it] 58%|█████▊ | 301/520 [19:26<13:47, 3.78s/it] {'loss': 1.2934, 'grad_norm': 0.0005296492847932599, 'learning_rate': 0.11936220273154796, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:26<13:47, 3.78s/it] 58%|█████▊ | 302/520 [19:29<13:40, 3.76s/it] {'loss': 1.3209, 'grad_norm': 0.0005293096118165092, 'learning_rate': 0.11844751734817566, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:29<13:40, 3.76s/it] 58%|█████▊ | 303/520 [19:33<13:33, 3.75s/it] {'loss': 1.2117, 'grad_norm': 0.0006187605783999152, 'learning_rate': 0.11753405790928456, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:33<13:33, 3.75s/it] 58%|█████▊ | 304/520 [19:37<13:28, 3.74s/it] {'loss': 1.223, 'grad_norm': 0.00056785320472747, 'learning_rate': 0.11662185990655284, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:37<13:28, 3.74s/it] 59%|█████▊ | 305/520 [19:41<13:20, 3.73s/it] {'loss': 1.3318, 'grad_norm': 0.0006762057277525465, 'learning_rate': 0.11571095878264659, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:41<13:20, 3.73s/it] 59%|█████▉ | 306/520 [19:44<13:22, 3.75s/it] {'loss': 1.265, 'grad_norm': 0.0005471700692546701, 'learning_rate': 0.11480138992984275, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:44<13:22, 3.75s/it] 59%|█████▉ | 307/520 [19:49<13:43, 3.86s/it] {'loss': 1.2066, 'grad_norm': 0.0005486594911827775, 'learning_rate': 0.11389318868865408, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:49<13:43, 3.86s/it] 59%|█████▉ | 308/520 [19:52<13:30, 3.82s/it] {'loss': 1.3215, 'grad_norm': 0.0005244576601111262, 'learning_rate': 0.11298639034645594, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:52<13:30, 3.82s/it] 59%|█████▉ | 309/520 [19:56<13:22, 3.80s/it] {'loss': 1.208, 'grad_norm': 0.0005115234418049034, 'learning_rate': 0.11208103013611535, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:56<13:22, 3.80s/it] 60%|█████▉ | 310/520 [20:00<13:09, 3.76s/it] {'loss': 1.1833, 'grad_norm': 0.0005295080248987385, 'learning_rate': 0.11117714323462187, 'epoch': 0.6} + 60%|█████▉ | 310/520 [20:00<13:09, 3.76s/it] 60%|█████▉ | 311/520 [20:03<13:03, 3.75s/it] {'loss': 1.1659, 'grad_norm': 0.0005939743152311461, 'learning_rate': 0.1102747647617209, 'epoch': 0.6} + 60%|█████▉ | 311/520 [20:03<13:03, 3.75s/it] 60%|██████ | 312/520 [20:07<12:56, 3.73s/it] {'loss': 1.1566, 'grad_norm': 0.0005453714789422513, 'learning_rate': 0.10937392977854925, 'epoch': 0.6} + 60%|██████ | 312/520 [20:07<12:56, 3.73s/it] 60%|██████ | 313/520 [20:11<12:50, 3.72s/it] {'loss': 1.1355, 'grad_norm': 0.0004904045709418357, 'learning_rate': 0.1084746732862726, 'epoch': 0.6} + 60%|██████ | 313/520 [20:11<12:50, 3.72s/it] 60%|██████ | 314/520 [20:15<13:14, 3.86s/it] {'loss': 1.1775, 'grad_norm': 0.0005375990196781214, 'learning_rate': 0.10757703022472588, 'epoch': 0.6} + 60%|██████ | 314/520 [20:15<13:14, 3.86s/it] 61%|██████ | 315/520 [20:19<13:00, 3.81s/it] {'loss': 1.2918, 'grad_norm': 0.0006139396582316568, 'learning_rate': 0.10668103547105554, 'epoch': 0.61} + 61%|██████ | 315/520 [20:19<13:00, 3.81s/it] 61%|██████ | 316/520 [20:23<13:15, 3.90s/it] {'loss': 1.1573, 'grad_norm': 0.0005245637129005359, 'learning_rate': 0.10578672383836436, 'epoch': 0.61} + 61%|██████ | 316/520 [20:23<13:15, 3.90s/it] 61%|██████ | 317/520 [20:26<13:00, 3.85s/it] {'loss': 1.168, 'grad_norm': 0.0004870081034236216, 'learning_rate': 0.10489413007435905, 'epoch': 0.61} + 61%|██████ | 317/520 [20:26<13:00, 3.85s/it] 61%|██████ | 318/520 [20:30<12:49, 3.81s/it] {'loss': 1.2832, 'grad_norm': 0.0006718503827367988, 'learning_rate': 0.10400328885999988, 'epoch': 0.61} + 61%|██████ | 318/520 [20:30<12:49, 3.81s/it] 61%|██████▏ | 319/520 [20:34<13:02, 3.89s/it] {'loss': 1.1637, 'grad_norm': 0.000666742942078955, 'learning_rate': 0.10311423480815335, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:34<13:02, 3.89s/it] 62%|██████▏ | 320/520 [20:38<12:48, 3.84s/it] {'loss': 1.1051, 'grad_norm': 0.0005074504072954233, 'learning_rate': 0.10222700246224735, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:38<12:48, 3.84s/it] 62%|██████▏ | 321/520 [20:42<13:01, 3.93s/it] {'loss': 1.3012, 'grad_norm': 0.0005186956960335234, 'learning_rate': 0.10134162629492895, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:42<13:01, 3.93s/it] 62%|██████▏ | 322/520 [20:46<12:49, 3.89s/it] {'loss': 1.1712, 'grad_norm': 0.0005289306771015237, 'learning_rate': 0.10045814070672499, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:46<12:49, 3.89s/it] 62%|██████▏ | 323/520 [20:50<12:33, 3.82s/it] {'loss': 1.2393, 'grad_norm': 0.0007043105047837659, 'learning_rate': 0.09957658002470542, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:50<12:33, 3.82s/it] 62%|██████▏ | 324/520 [20:53<12:24, 3.80s/it] {'loss': 1.2494, 'grad_norm': 0.0005269521102077257, 'learning_rate': 0.0986969785011497, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:53<12:24, 3.80s/it] 62%|██████▎ | 325/520 [20:57<12:18, 3.79s/it] {'loss': 1.2422, 'grad_norm': 0.0005951301571830869, 'learning_rate': 0.0978193703122159, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:57<12:18, 3.79s/it] 63%|██████▎ | 326/520 [21:01<12:11, 3.77s/it] {'loss': 1.2332, 'grad_norm': 0.0005408408015067307, 'learning_rate': 0.09694378955661279, 'epoch': 0.63} + 63%|██████▎ | 326/520 [21:01<12:11, 3.77s/it] 63%|██████▎ | 327/520 [21:05<12:15, 3.81s/it] {'loss': 1.2946, 'grad_norm': 0.0006040535107744257, 'learning_rate': 0.09607027025427486, 'epoch': 0.63} + 63%|██████▎ | 327/520 [21:05<12:15, 3.81s/it] 63%|██████▎ | 328/520 [21:09<12:19, 3.85s/it] {'loss': 1.296, 'grad_norm': 0.0005498418173353113, 'learning_rate': 0.09519884634504074, 'epoch': 0.63} + 63%|██████▎ | 328/520 [21:09<12:19, 3.85s/it] 63%|██████▎ | 329/520 [21:13<12:16, 3.86s/it] {'loss': 1.1596, 'grad_norm': 0.0004487082961918589, 'learning_rate': 0.09432955168733431, 'epoch': 0.63} + 63%|██████▎ | 329/520 [21:13<12:16, 3.86s/it] 63%|██████▎ | 330/520 [21:16<12:15, 3.87s/it] {'loss': 1.2413, 'grad_norm': 0.0004934437214711355, 'learning_rate': 0.09346242005684921, 'epoch': 0.63} + 63%|██████▎ | 330/520 [21:16<12:15, 3.87s/it] 64%|██████▎ | 331/520 [21:20<12:12, 3.88s/it] {'loss': 1.1912, 'grad_norm': 0.0005276848858698651, 'learning_rate': 0.09259748514523654, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:20<12:12, 3.88s/it] 64%|██████▍ | 332/520 [21:24<12:12, 3.90s/it] {'loss': 1.3117, 'grad_norm': 0.0005430861530754564, 'learning_rate': 0.09173478055879579, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:24<12:12, 3.90s/it] 64%|██████▍ | 333/520 [21:28<11:57, 3.84s/it] {'loss': 1.3447, 'grad_norm': 0.0005712467675224572, 'learning_rate': 0.09087433981716911, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:28<11:57, 3.84s/it] 64%|██████▍ | 334/520 [21:32<11:48, 3.81s/it] {'loss': 1.2485, 'grad_norm': 0.0005474462087378796, 'learning_rate': 0.09001619635203888, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:32<11:48, 3.81s/it] 64%|██████▍ | 335/520 [21:36<11:47, 3.83s/it] {'loss': 1.2508, 'grad_norm': 0.0005219627412200773, 'learning_rate': 0.08916038350582876, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:36<11:47, 3.83s/it] 65%|██████▍ | 336/520 [21:39<11:48, 3.85s/it] {'loss': 1.1366, 'grad_norm': 0.0005679358834734478, 'learning_rate': 0.08830693453040829, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:39<11:48, 3.85s/it] 65%|██████▍ | 337/520 [21:43<11:49, 3.88s/it] {'loss': 1.1302, 'grad_norm': 0.0005165500548119594, 'learning_rate': 0.08745588258580084, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:43<11:49, 3.88s/it] 65%|██████▌ | 338/520 [21:47<11:46, 3.88s/it] {'loss': 1.2534, 'grad_norm': 0.0005361277909372626, 'learning_rate': 0.0866072607388951, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:47<11:46, 3.88s/it] 65%|██████▌ | 339/520 [21:51<11:43, 3.89s/it] {'loss': 1.2103, 'grad_norm': 0.0005428376592542128, 'learning_rate': 0.08576110196216057, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:51<11:43, 3.89s/it] 65%|██████▌ | 340/520 [21:55<11:40, 3.89s/it] {'loss': 1.1869, 'grad_norm': 0.0005676803649739912, 'learning_rate': 0.08491743913236628, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:55<11:40, 3.89s/it] 66%|██████▌ | 341/520 [21:59<11:36, 3.89s/it] {'loss': 1.2038, 'grad_norm': 0.0005557082799305471, 'learning_rate': 0.08407630502930323, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:59<11:36, 3.89s/it] 66%|██████▌ | 342/520 [22:03<11:33, 3.89s/it] {'loss': 1.2845, 'grad_norm': 0.0006251465552947857, 'learning_rate': 0.08323773233451114, 'epoch': 0.66} + 66%|██████▌ | 342/520 [22:03<11:33, 3.89s/it] 66%|██████▌ | 343/520 [22:07<11:30, 3.90s/it] {'loss': 1.249, 'grad_norm': 0.00047680875622140543, 'learning_rate': 0.08240175363000819, 'epoch': 0.66} + 66%|██████▌ | 343/520 [22:07<11:30, 3.90s/it] 66%|██████▌ | 344/520 [22:11<11:27, 3.91s/it] {'loss': 1.1609, 'grad_norm': 0.00047492760251779183, 'learning_rate': 0.08156840139702555, 'epoch': 0.66} + 66%|██████▌ | 344/520 [22:11<11:27, 3.91s/it] 66%|██████▋ | 345/520 [22:15<11:25, 3.92s/it] {'loss': 1.277, 'grad_norm': 0.0005533160945086153, 'learning_rate': 0.08073770801474495, 'epoch': 0.66} + 66%|██████▋ | 345/520 [22:15<11:25, 3.92s/it] 67%|██████▋ | 346/520 [22:19<11:21, 3.92s/it] {'loss': 1.2419, 'grad_norm': 0.0004993938452676766, 'learning_rate': 0.07990970575904069, 'epoch': 0.67} + 67%|██████▋ | 346/520 [22:19<11:21, 3.92s/it] 67%|██████▋ | 347/520 [22:23<11:18, 3.92s/it] {'loss': 1.1901, 'grad_norm': 0.0004930129953511486, 'learning_rate': 0.07908442680122597, 'epoch': 0.67} + 67%|██████▋ | 347/520 [22:23<11:18, 3.92s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:26<11:15, 3.93s/it] {'loss': 1.1533, 'grad_norm': 0.0005965382348309292, 'learning_rate': 0.0782619032068023, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:26<11:15, 3.93s/it] 67%|██████▋ | 349/520 [22:30<11:10, 3.92s/it] {'loss': 1.1861, 'grad_norm': 0.0005892112470159636, 'learning_rate': 0.07744216693421403, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:30<11:10, 3.92s/it] 67%|██████▋ | 350/520 [22:34<11:10, 3.94s/it] {'loss': 1.226, 'grad_norm': 0.0005402868530220039, 'learning_rate': 0.07662524983360665, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:34<11:10, 3.94s/it] 68%|██████▊ | 351/520 [22:38<11:05, 3.94s/it] {'loss': 1.1364, 'grad_norm': 0.0005121916957033159, 'learning_rate': 0.07581118364558888, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:38<11:05, 3.94s/it] 68%|██████▊ | 352/520 [22:42<10:59, 3.93s/it] {'loss': 1.2515, 'grad_norm': 0.0005083656900122223, 'learning_rate': 0.07500000000000002, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:42<10:59, 3.93s/it] 68%|██████▊ | 353/520 [22:46<10:56, 3.93s/it] {'loss': 1.2127, 'grad_norm': 0.0005175282345214107, 'learning_rate': 0.07419173041468043, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:46<10:56, 3.93s/it] 68%|██████▊ | 354/520 [22:50<10:50, 3.92s/it] {'loss': 1.3271, 'grad_norm': 0.0005053869080537125, 'learning_rate': 0.0733864062942472, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:50<10:50, 3.92s/it] 68%|██████▊ | 355/520 [22:54<10:47, 3.92s/it] {'loss': 1.1935, 'grad_norm': 0.0005117863490095554, 'learning_rate': 0.07258405892887398, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:54<10:47, 3.92s/it] 68%|██████▊ | 356/520 [22:58<10:44, 3.93s/it] {'loss': 1.1903, 'grad_norm': 0.0005553934832701858, 'learning_rate': 0.0717847194930753, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:58<10:44, 3.93s/it] 69%|██████▊ | 357/520 [23:02<10:40, 3.93s/it] {'loss': 1.2196, 'grad_norm': 0.0004914489740749421, 'learning_rate': 0.07098841904449488, 'epoch': 0.69} + 69%|██████▊ | 357/520 [23:02<10:40, 3.93s/it] 69%|██████▉ | 358/520 [23:06<10:40, 3.95s/it] {'loss': 1.1576, 'grad_norm': 0.0005214147604238106, 'learning_rate': 0.07019518852269953, 'epoch': 0.69} + 69%|██████▉ | 358/520 [23:06<10:40, 3.95s/it] 69%|██████▉ | 359/520 [23:10<10:28, 3.90s/it] {'loss': 1.2628, 'grad_norm': 0.0006007220400027985, 'learning_rate': 0.0694050587479764, 'epoch': 0.69} + 69%|██████▉ | 359/520 [23:10<10:28, 3.90s/it] 69%|██████▉ | 360/520 [23:13<10:15, 3.85s/it] {'loss': 1.2843, 'grad_norm': 0.0005772194652102714, 'learning_rate': 0.0686180604201361, 'epoch': 0.69} + 69%|██████▉ | 360/520 [23:13<10:15, 3.85s/it] 69%|██████▉ | 361/520 [23:17<10:05, 3.81s/it] {'loss': 1.2779, 'grad_norm': 0.0004883784011165272, 'learning_rate': 0.06783422411731932, 'epoch': 0.69} + 69%|██████▉ | 361/520 [23:17<10:05, 3.81s/it] 70%|██████▉ | 362/520 [23:21<09:54, 3.77s/it] {'loss': 1.204, 'grad_norm': 0.0005678331435340552, 'learning_rate': 0.06705358029480908, 'epoch': 0.7} + 70%|██████▉ | 362/520 [23:21<09:54, 3.77s/it] 70%|██████▉ | 363/520 [23:24<09:50, 3.76s/it] {'loss': 1.2537, 'grad_norm': 0.0005290628093331339, 'learning_rate': 0.06627615928384743, 'epoch': 0.7} + 70%|██████▉ | 363/520 [23:24<09:50, 3.76s/it] 70%|███████ | 364/520 [23:28<09:48, 3.77s/it] {'loss': 1.2969, 'grad_norm': 0.0005627741612040187, 'learning_rate': 0.06550199129045668, 'epoch': 0.7} + 70%|███████ | 364/520 [23:28<09:48, 3.77s/it] 70%|███████ | 365/520 [23:32<09:48, 3.79s/it] {'loss': 1.301, 'grad_norm': 0.0005375539799093782, 'learning_rate': 0.06473110639426617, 'epoch': 0.7} + 70%|███████ | 365/520 [23:32<09:48, 3.79s/it] 70%|███████ | 366/520 [23:36<09:43, 3.79s/it] {'loss': 1.2555, 'grad_norm': 0.0005020202411524711, 'learning_rate': 0.06396353454734312, 'epoch': 0.7} + 70%|███████ | 366/520 [23:36<09:43, 3.79s/it] 71%|███████ | 367/520 [23:40<09:35, 3.76s/it] {'loss': 1.2551, 'grad_norm': 0.0005420259155511596, 'learning_rate': 0.06319930557302914, 'epoch': 0.71} + 71%|███████ | 367/520 [23:40<09:35, 3.76s/it] 71%|███████ | 368/520 [23:43<09:31, 3.76s/it] {'loss': 1.1162, 'grad_norm': 0.0006040911721377629, 'learning_rate': 0.062438449164781556, 'epoch': 0.71} + 71%|███████ | 368/520 [23:43<09:31, 3.76s/it] 71%|███████ | 369/520 [23:47<09:36, 3.82s/it] {'loss': 1.2592, 'grad_norm': 0.0004934279336539233, 'learning_rate': 0.0616809948850193, 'epoch': 0.71} + 71%|███████ | 369/520 [23:47<09:36, 3.82s/it] 71%|███████ | 370/520 [23:51<09:38, 3.86s/it] {'loss': 1.1714, 'grad_norm': 0.0004937150845752156, 'learning_rate': 0.060926972163974774, 'epoch': 0.71} + 71%|███████ | 370/520 [23:51<09:38, 3.86s/it] 71%|███████▏ | 371/520 [23:55<09:38, 3.88s/it] {'loss': 1.1702, 'grad_norm': 0.0005693598219227741, 'learning_rate': 0.060176410298549955, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:55<09:38, 3.88s/it] 72%|███████▏ | 372/520 [23:59<09:25, 3.82s/it] {'loss': 1.3456, 'grad_norm': 0.0005201625865705451, 'learning_rate': 0.05942933845117836, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:59<09:25, 3.82s/it] 72%|███████▏ | 373/520 [24:03<09:19, 3.81s/it] {'loss': 1.2302, 'grad_norm': 0.0006250755830461387, 'learning_rate': 0.058685785648691896, 'epoch': 0.72} + 72%|███████▏ | 373/520 [24:03<09:19, 3.81s/it] 72%|███████▏ | 374/520 [24:06<09:11, 3.78s/it] {'loss': 1.2562, 'grad_norm': 0.0005557901074819345, 'learning_rate': 0.05794578078119291, 'epoch': 0.72} + 72%|███████▏ | 374/520 [24:06<09:11, 3.78s/it] 72%|███████▏ | 375/520 [24:10<09:07, 3.77s/it] {'loss': 1.1565, 'grad_norm': 0.0005006223750711049, 'learning_rate': 0.05720935260093177, 'epoch': 0.72} + 72%|███████▏ | 375/520 [24:10<09:07, 3.77s/it] 72%|███████▏ | 376/520 [24:14<09:04, 3.78s/it] {'loss': 1.2775, 'grad_norm': 0.0004826352142997974, 'learning_rate': 0.05647652972118997, 'epoch': 0.72} + 72%|███████▏ | 376/520 [24:14<09:04, 3.78s/it] 72%|███████▎ | 377/520 [24:18<08:59, 3.77s/it] {'loss': 1.2271, 'grad_norm': 0.000610558845127567, 'learning_rate': 0.0557473406151679, 'epoch': 0.72} + 72%|███████▎ | 377/520 [24:18<08:59, 3.77s/it] 73%|███████▎ | 378/520 [24:21<08:55, 3.77s/it] {'loss': 1.2802, 'grad_norm': 0.0005133667761629304, 'learning_rate': 0.055021813614879046, 'epoch': 0.73} + 73%|███████▎ | 378/520 [24:21<08:55, 3.77s/it] 73%|███████▎ | 379/520 [24:25<08:58, 3.82s/it] {'loss': 1.2471, 'grad_norm': 0.0005282692995603155, 'learning_rate': 0.05429997691004873, 'epoch': 0.73} + 73%|███████▎ | 379/520 [24:25<08:58, 3.82s/it] 73%|███████▎ | 380/520 [24:29<08:57, 3.84s/it] {'loss': 1.3189, 'grad_norm': 0.0005853293863563649, 'learning_rate': 0.05358185854701909, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:29<08:57, 3.84s/it] 73%|███████▎ | 381/520 [24:33<08:49, 3.81s/it] {'loss': 1.2555, 'grad_norm': 0.0005297232540686154, 'learning_rate': 0.052867486427659455, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:33<08:49, 3.81s/it] 73%|███████▎ | 382/520 [24:37<08:44, 3.80s/it] {'loss': 1.2734, 'grad_norm': 0.0005034106828354277, 'learning_rate': 0.05215688830828187, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:37<08:44, 3.80s/it] 74%|███████▎ | 383/520 [24:40<08:35, 3.77s/it] {'loss': 1.0951, 'grad_norm': 0.0006252069903549342, 'learning_rate': 0.05145009179856295, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:40<08:35, 3.77s/it] 74%|███████▍ | 384/520 [24:44<08:31, 3.76s/it] {'loss': 1.3437, 'grad_norm': 0.0005117415134195137, 'learning_rate': 0.05074712436047112, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:44<08:31, 3.76s/it] 74%|███████▍ | 385/520 [24:48<08:25, 3.74s/it] {'loss': 1.235, 'grad_norm': 0.0005075239126270717, 'learning_rate': 0.050048013307199414, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:48<08:25, 3.74s/it] 74%|███████▍ | 386/520 [24:52<08:18, 3.72s/it] {'loss': 1.1794, 'grad_norm': 0.000459884785373176, 'learning_rate': 0.04935278580210451, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:52<08:18, 3.72s/it] 74%|███████▍ | 387/520 [24:55<08:15, 3.73s/it] {'loss': 1.3439, 'grad_norm': 0.0006315553768543826, 'learning_rate': 0.048661468857650964, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:55<08:15, 3.73s/it] 75%|███████▍ | 388/520 [24:59<08:13, 3.74s/it] {'loss': 1.1365, 'grad_norm': 0.0004913917279349577, 'learning_rate': 0.04797408933436206, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:59<08:13, 3.74s/it] 75%|███████▍ | 389/520 [25:03<08:09, 3.73s/it] {'loss': 1.1868, 'grad_norm': 0.0005995180694904599, 'learning_rate': 0.04729067393977597, 'epoch': 0.75} + 75%|███████▍ | 389/520 [25:03<08:09, 3.73s/it] 75%|███████▌ | 390/520 [25:07<08:03, 3.72s/it] {'loss': 1.2586, 'grad_norm': 0.000586771429615122, 'learning_rate': 0.04661124922740795, 'epoch': 0.75} + 75%|███████▌ | 390/520 [25:07<08:03, 3.72s/it] 75%|███████▌ | 391/520 [25:10<07:58, 3.71s/it] {'loss': 1.3337, 'grad_norm': 0.0005568277154499975, 'learning_rate': 0.04593584159571875, 'epoch': 0.75} + 75%|███████▌ | 391/520 [25:10<07:58, 3.71s/it] 75%|███████▌ | 392/520 [25:14<07:56, 3.73s/it] {'loss': 1.152, 'grad_norm': 0.000538507686061345, 'learning_rate': 0.045264477287089086, 'epoch': 0.75} + 75%|███████▌ | 392/520 [25:14<07:56, 3.73s/it] 76%|███████▌ | 393/520 [25:18<07:51, 3.71s/it] {'loss': 1.1747, 'grad_norm': 0.0004860451098606228, 'learning_rate': 0.044597182386799626, 'epoch': 0.76} + 76%|███████▌ | 393/520 [25:18<07:51, 3.71s/it] 76%|███████▌ | 394/520 [25:21<07:50, 3.73s/it] {'loss': 1.2113, 'grad_norm': 0.0005668689006899245, 'learning_rate': 0.04393398282201788, 'epoch': 0.76} + 76%|███████▌ | 394/520 [25:21<07:50, 3.73s/it] 76%|███████▌ | 395/520 [25:25<07:48, 3.75s/it] {'loss': 1.1793, 'grad_norm': 0.0005339279545733635, 'learning_rate': 0.04327490436079051, 'epoch': 0.76} + 76%|███████▌ | 395/520 [25:25<07:48, 3.75s/it] 76%|███████▌ | 396/520 [25:29<07:44, 3.75s/it] {'loss': 1.2562, 'grad_norm': 0.0005567227050333032, 'learning_rate': 0.04261997261104223, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:29<07:44, 3.75s/it] 76%|███████▋ | 397/520 [25:33<07:38, 3.73s/it] {'loss': 1.2299, 'grad_norm': 0.0005459849943525577, 'learning_rate': 0.04196921301958104, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:33<07:38, 3.73s/it] 77%|███████▋ | 398/520 [25:36<07:33, 3.72s/it] {'loss': 1.2257, 'grad_norm': 0.0005364339890701468, 'learning_rate': 0.0413226508711091, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:36<07:33, 3.72s/it] 77%|███████▋ | 399/520 [25:40<07:30, 3.72s/it] {'loss': 1.2158, 'grad_norm': 0.0005519167803077069, 'learning_rate': 0.04068031128724075, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:40<07:30, 3.72s/it] 77%|███████▋ | 400/520 [25:44<07:31, 3.76s/it] {'loss': 1.2549, 'grad_norm': 0.0005352509395980298, 'learning_rate': 0.04004221922552608, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:44<07:31, 3.76s/it] 77%|███████▋ | 401/520 [25:48<07:27, 3.76s/it] {'loss': 1.0609, 'grad_norm': 0.0005577461354313177, 'learning_rate': 0.039408399478481404, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:48<07:27, 3.76s/it] 77%|███████▋ | 402/520 [25:51<07:20, 3.74s/it] {'loss': 1.185, 'grad_norm': 0.0005313585971721359, 'learning_rate': 0.038778876672625986, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:51<07:20, 3.74s/it] 78%|███████▊ | 403/520 [25:55<07:16, 3.73s/it] {'loss': 1.2202, 'grad_norm': 0.0005742898366961363, 'learning_rate': 0.03815367526752516, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:55<07:16, 3.73s/it] 78%|███████▊ | 404/520 [25:59<07:12, 3.72s/it] {'loss': 1.1231, 'grad_norm': 0.0006913787139895212, 'learning_rate': 0.03753281955483985, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:59<07:12, 3.72s/it] 78%|███████▊ | 405/520 [26:02<07:08, 3.72s/it] {'loss': 1.2266, 'grad_norm': 0.0005238100174378445, 'learning_rate': 0.036916333657383026, 'epoch': 0.78} + 78%|███████▊ | 405/520 [26:02<07:08, 3.72s/it] 78%|███████▊ | 406/520 [26:06<07:03, 3.72s/it] {'loss': 1.1643, 'grad_norm': 0.0006492528112936115, 'learning_rate': 0.03630424152818203, 'epoch': 0.78} + 78%|███████▊ | 406/520 [26:06<07:03, 3.72s/it] 78%|███████▊ | 407/520 [26:10<07:01, 3.73s/it] {'loss': 1.3087, 'grad_norm': 0.0005594046789344513, 'learning_rate': 0.035696566949548376, 'epoch': 0.78} + 78%|███████▊ | 407/520 [26:10<07:01, 3.73s/it] 78%|███████▊ | 408/520 [26:14<06:57, 3.73s/it] {'loss': 1.2104, 'grad_norm': 0.0006132174811981858, 'learning_rate': 0.03509333353215331, 'epoch': 0.78} + 78%|███████▊ | 408/520 [26:14<06:57, 3.73s/it] 79%|███████▊ | 409/520 [26:17<06:52, 3.71s/it] {'loss': 1.3341, 'grad_norm': 0.0006596220672560982, 'learning_rate': 0.03449456471411058, 'epoch': 0.79} + 79%|███████▊ | 409/520 [26:17<06:52, 3.71s/it] 79%|███████▉ | 410/520 [26:21<06:52, 3.75s/it] {'loss': 1.0589, 'grad_norm': 0.0005323372893740229, 'learning_rate': 0.03390028376006589, 'epoch': 0.79} + 79%|███████▉ | 410/520 [26:21<06:52, 3.75s/it] 79%|███████▉ | 411/520 [26:25<06:57, 3.83s/it] {'loss': 1.3066, 'grad_norm': 0.0006304083606315132, 'learning_rate': 0.03331051376029279, 'epoch': 0.79} + 79%|███████▉ | 411/520 [26:25<06:57, 3.83s/it] 79%|███████▉ | 412/520 [26:29<06:55, 3.85s/it] {'loss': 1.2183, 'grad_norm': 0.000544056881153443, 'learning_rate': 0.032725277629795525, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:29<06:55, 3.85s/it] 79%|███████▉ | 413/520 [26:33<06:51, 3.85s/it] {'loss': 1.2533, 'grad_norm': 0.0005689770105878345, 'learning_rate': 0.03214459810741897, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:33<06:51, 3.85s/it] 80%|███████▉ | 414/520 [26:37<06:43, 3.81s/it] {'loss': 1.0491, 'grad_norm': 0.00048048651012426116, 'learning_rate': 0.0315684977549647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:37<06:43, 3.81s/it] 80%|███████▉ | 415/520 [26:40<06:37, 3.78s/it] {'loss': 1.1982, 'grad_norm': 0.00049352892083536, 'learning_rate': 0.03099699895631474, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:40<06:37, 3.78s/it] 80%|████████ | 416/520 [26:44<06:31, 3.77s/it] {'loss': 1.1174, 'grad_norm': 0.0005931359592237449, 'learning_rate': 0.030430123916561674, 'epoch': 0.8} + 80%|████████ | 416/520 [26:44<06:31, 3.77s/it] 80%|████████ | 417/520 [26:48<06:26, 3.75s/it] {'loss': 1.271, 'grad_norm': 0.0005434450114966278, 'learning_rate': 0.02986789466114582, 'epoch': 0.8} + 80%|████████ | 417/520 [26:48<06:26, 3.75s/it] 80%|████████ | 418/520 [26:52<06:20, 3.73s/it] {'loss': 1.2624, 'grad_norm': 0.0004935148295811374, 'learning_rate': 0.029310333034999747, 'epoch': 0.8} + 80%|████████ | 418/520 [26:52<06:20, 3.73s/it] 81%|████████ | 419/520 [26:55<06:16, 3.73s/it] {'loss': 1.2488, 'grad_norm': 0.0005727204300083266, 'learning_rate': 0.028757460701699215, 'epoch': 0.81} + 81%|████████ | 419/520 [26:55<06:16, 3.73s/it] 81%|████████ | 420/520 [26:59<06:11, 3.72s/it] {'loss': 1.1405, 'grad_norm': 0.0005641307343647141, 'learning_rate': 0.028209299142621522, 'epoch': 0.81} + 81%|████████ | 420/520 [26:59<06:11, 3.72s/it] 81%|████████ | 421/520 [27:03<06:07, 3.71s/it] {'loss': 1.0746, 'grad_norm': 0.0005632470046837393, 'learning_rate': 0.027665869656110974, 'epoch': 0.81} + 81%|████████ | 421/520 [27:03<06:07, 3.71s/it] 81%|████████ | 422/520 [27:06<06:03, 3.71s/it] {'loss': 1.2044, 'grad_norm': 0.0005650822246252323, 'learning_rate': 0.027127193356651213, 'epoch': 0.81} + 81%|████████ | 422/520 [27:06<06:03, 3.71s/it] 81%|████████▏ | 423/520 [27:10<05:59, 3.70s/it] {'loss': 1.1666, 'grad_norm': 0.0005709044671140285, 'learning_rate': 0.026593291174044995, 'epoch': 0.81} + 81%|████████▏ | 423/520 [27:10<05:59, 3.70s/it] 82%|████████▏ | 424/520 [27:14<05:58, 3.74s/it] {'loss': 1.3401, 'grad_norm': 0.0005256011825667661, 'learning_rate': 0.026064183852600797, 'epoch': 0.82} + 82%|████████▏ | 424/520 [27:14<05:58, 3.74s/it] 82%|████████▏ | 425/520 [27:18<05:55, 3.74s/it] {'loss': 1.1941, 'grad_norm': 0.0005393087098401285, 'learning_rate': 0.025539891950326875, 'epoch': 0.82} + 82%|████████▏ | 425/520 [27:18<05:55, 3.74s/it] 82%|████████▏ | 426/520 [27:21<05:50, 3.72s/it] {'loss': 1.2297, 'grad_norm': 0.0006923253154272786, 'learning_rate': 0.025020435838132675, 'epoch': 0.82} + 82%|████████▏ | 426/520 [27:21<05:50, 3.72s/it] 82%|████████▏ | 427/520 [27:25<05:48, 3.74s/it] {'loss': 1.1268, 'grad_norm': 0.0005102293305440904, 'learning_rate': 0.024505835699037037, 'epoch': 0.82} + 82%|████████▏ | 427/520 [27:25<05:48, 3.74s/it] 82%|████████▏ | 428/520 [27:29<05:42, 3.72s/it] {'loss': 1.1103, 'grad_norm': 0.0005767903838222629, 'learning_rate': 0.02399611152738429, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:29<05:42, 3.72s/it] 82%|████████▎ | 429/520 [27:33<05:40, 3.74s/it] {'loss': 1.212, 'grad_norm': 0.0005446244526074713, 'learning_rate': 0.023491283128067173, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:33<05:40, 3.74s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:36<05:37, 3.75s/it] {'loss': 1.2089, 'grad_norm': 0.000515630205948715, 'learning_rate': 0.02299137011575738, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:36<05:37, 3.75s/it] 83%|████████▎ | 431/520 [27:40<05:32, 3.73s/it] {'loss': 1.2251, 'grad_norm': 0.0005519082746296263, 'learning_rate': 0.02249639191414363, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:40<05:32, 3.73s/it] 83%|████████▎ | 432/520 [27:44<05:28, 3.73s/it] {'loss': 1.1168, 'grad_norm': 0.0005247280190125458, 'learning_rate': 0.02200636775517666, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:44<05:28, 3.73s/it] 83%|████████▎ | 433/520 [27:47<05:23, 3.72s/it] {'loss': 1.252, 'grad_norm': 0.0005218393803287393, 'learning_rate': 0.0215213166783223, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:47<05:23, 3.72s/it] 83%|████████▎ | 434/520 [27:51<05:20, 3.73s/it] {'loss': 0.9947, 'grad_norm': 0.0005231096011614456, 'learning_rate': 0.021041257529821455, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:51<05:20, 3.73s/it] 84%|████████▎ | 435/520 [27:55<05:15, 3.72s/it] {'loss': 1.2924, 'grad_norm': 0.0006583926520221763, 'learning_rate': 0.020566208961958043, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:55<05:15, 3.72s/it] 84%|████████▍ | 436/520 [27:59<05:11, 3.71s/it] {'loss': 1.0815, 'grad_norm': 0.0005537886536547573, 'learning_rate': 0.020096189432334193, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:59<05:11, 3.71s/it] 84%|████████▍ | 437/520 [28:02<05:07, 3.71s/it] {'loss': 1.3155, 'grad_norm': 0.0005638513090641926, 'learning_rate': 0.01963121720315304, 'epoch': 0.84} + 84%|████████▍ | 437/520 [28:02<05:07, 3.71s/it] 84%|████████▍ | 438/520 [28:06<05:03, 3.70s/it] {'loss': 1.1151, 'grad_norm': 0.0005346879104499501, 'learning_rate': 0.0191713103405092, 'epoch': 0.84} + 84%|████████▍ | 438/520 [28:06<05:03, 3.70s/it] 84%|████████▍ | 439/520 [28:10<04:58, 3.69s/it] {'loss': 1.196, 'grad_norm': 0.00046241645160719776, 'learning_rate': 0.018716486713686947, 'epoch': 0.84} + 84%|████████▍ | 439/520 [28:10<04:58, 3.69s/it] 85%|████████▍ | 440/520 [28:13<04:55, 3.69s/it] {'loss': 1.1702, 'grad_norm': 0.0005761191916223814, 'learning_rate': 0.0182667639944657, 'epoch': 0.85} + 85%|████████▍ | 440/520 [28:13<04:55, 3.69s/it] 85%|████████▍ | 441/520 [28:17<04:52, 3.71s/it] {'loss': 1.2365, 'grad_norm': 0.0005404739371726959, 'learning_rate': 0.017822159656433637, 'epoch': 0.85} + 85%|████████▍ | 441/520 [28:17<04:52, 3.71s/it] 85%|████████▌ | 442/520 [28:21<04:48, 3.70s/it] {'loss': 1.2288, 'grad_norm': 0.0006101126317335493, 'learning_rate': 0.01738269097430855, 'epoch': 0.85} + 85%|████████▌ | 442/520 [28:21<04:48, 3.70s/it] 85%|████████▌ | 443/520 [28:24<04:45, 3.71s/it] {'loss': 1.2414, 'grad_norm': 0.0005271793675051035, 'learning_rate': 0.01694837502326674, 'epoch': 0.85} + 85%|████████▌ | 443/520 [28:24<04:45, 3.71s/it] 85%|████████▌ | 444/520 [28:28<04:42, 3.71s/it] {'loss': 1.2025, 'grad_norm': 0.0005077699217105925, 'learning_rate': 0.016519228678279717, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:28<04:42, 3.71s/it] 86%|████████▌ | 445/520 [28:32<04:39, 3.73s/it] {'loss': 1.1331, 'grad_norm': 0.0005129497456565839, 'learning_rate': 0.0160952686134583, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:32<04:39, 3.73s/it] 86%|████████▌ | 446/520 [28:36<04:36, 3.74s/it] {'loss': 1.2996, 'grad_norm': 0.0005012382949649386, 'learning_rate': 0.01567651130140486, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:36<04:36, 3.74s/it] 86%|████████▌ | 447/520 [28:39<04:32, 3.73s/it] {'loss': 1.2109, 'grad_norm': 0.0005320992238637937, 'learning_rate': 0.015262973012573393, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:39<04:32, 3.73s/it] 86%|████████▌ | 448/520 [28:43<04:28, 3.74s/it] {'loss': 1.1953, 'grad_norm': 0.0006381852433164365, 'learning_rate': 0.014854669814637143, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:43<04:28, 3.74s/it] 86%|████████▋ | 449/520 [28:47<04:25, 3.74s/it] {'loss': 1.267, 'grad_norm': 0.0005684521716435078, 'learning_rate': 0.014451617571864528, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:47<04:25, 3.74s/it] 87%|████████▋ | 450/520 [28:51<04:20, 3.72s/it] {'loss': 1.2308, 'grad_norm': 0.000550676266087708, 'learning_rate': 0.014053831944502508, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:51<04:20, 3.72s/it] 87%|████████▋ | 451/520 [28:54<04:16, 3.72s/it] {'loss': 1.2353, 'grad_norm': 0.0005380038478223552, 'learning_rate': 0.013661328388168359, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:54<04:16, 3.72s/it] 87%|████████▋ | 452/520 [28:58<04:12, 3.72s/it] {'loss': 1.2988, 'grad_norm': 0.0005215919777393287, 'learning_rate': 0.013274122153249029, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:58<04:12, 3.72s/it] 87%|████████▋ | 453/520 [29:02<04:08, 3.71s/it] {'loss': 1.28, 'grad_norm': 0.0006047745544946509, 'learning_rate': 0.01289222828430855, 'epoch': 0.87} + 87%|████████▋ | 453/520 [29:02<04:08, 3.71s/it] 87%|████████▋ | 454/520 [29:05<04:05, 3.72s/it] {'loss': 1.1368, 'grad_norm': 0.000532260017048182, 'learning_rate': 0.01251566161950357, 'epoch': 0.87} + 87%|████████▋ | 454/520 [29:05<04:05, 3.72s/it] 88%|████████▊ | 455/520 [29:09<04:00, 3.70s/it] {'loss': 1.2816, 'grad_norm': 0.0005581266844920126, 'learning_rate': 0.012144436790006902, 'epoch': 0.88} + 88%|████████▊ | 455/520 [29:09<04:00, 3.70s/it] 88%|████████▊ | 456/520 [29:13<03:56, 3.69s/it] {'loss': 1.1993, 'grad_norm': 0.0005467433693362934, 'learning_rate': 0.01177856821943884, 'epoch': 0.88} + 88%|████████▊ | 456/520 [29:13<03:56, 3.69s/it] 88%|████████▊ | 457/520 [29:16<03:53, 3.70s/it] {'loss': 1.2118, 'grad_norm': 0.0005446318014914927, 'learning_rate': 0.011418070123306989, 'epoch': 0.88} + 88%|████████▊ | 457/520 [29:16<03:53, 3.70s/it] 88%|████████▊ | 458/520 [29:20<03:49, 3.70s/it] {'loss': 1.3404, 'grad_norm': 0.0006202523468768707, 'learning_rate': 0.011062956508453703, 'epoch': 0.88} + 88%|████████▊ | 458/520 [29:20<03:49, 3.70s/it] 88%|████████▊ | 459/520 [29:24<03:47, 3.72s/it] {'loss': 1.2629, 'grad_norm': 0.0005446323592798832, 'learning_rate': 0.010713241172511967, 'epoch': 0.88} + 88%|████████▊ | 459/520 [29:24<03:47, 3.72s/it] 88%|████████▊ | 460/520 [29:28<03:43, 3.72s/it] {'loss': 1.1513, 'grad_norm': 0.0005450221953321814, 'learning_rate': 0.01036893770336938, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:28<03:43, 3.72s/it] 89%|████████▊ | 461/520 [29:31<03:39, 3.72s/it] {'loss': 1.2977, 'grad_norm': 0.0004957497372448733, 'learning_rate': 0.010030059478640024, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:31<03:39, 3.72s/it] 89%|████████▉ | 462/520 [29:35<03:35, 3.72s/it] {'loss': 1.3481, 'grad_norm': 0.0005492233578452967, 'learning_rate': 0.009696619665144901, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:35<03:35, 3.72s/it] 89%|████████▉ | 463/520 [29:39<03:30, 3.70s/it] {'loss': 1.1173, 'grad_norm': 0.0006182684778090482, 'learning_rate': 0.009368631218400135, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:39<03:30, 3.70s/it] 89%|████████▉ | 464/520 [29:43<03:29, 3.73s/it] {'loss': 1.2502, 'grad_norm': 0.0005752984953973459, 'learning_rate': 0.009046106882113752, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:43<03:29, 3.73s/it] 89%|████████▉ | 465/520 [29:46<03:28, 3.79s/it] {'loss': 1.3558, 'grad_norm': 0.0005692878196908992, 'learning_rate': 0.00872905918769048, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:46<03:28, 3.79s/it] 90%|████████▉ | 466/520 [29:50<03:27, 3.84s/it] {'loss': 1.2472, 'grad_norm': 0.0005086376573164805, 'learning_rate': 0.008417500453744864, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:50<03:27, 3.84s/it] 90%|████████▉ | 467/520 [29:54<03:25, 3.88s/it] {'loss': 1.2302, 'grad_norm': 0.0005209570833635794, 'learning_rate': 0.008111442785622596, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:54<03:25, 3.88s/it] 90%|█████████ | 468/520 [29:58<03:22, 3.89s/it] {'loss': 1.2221, 'grad_norm': 0.0006137912876908439, 'learning_rate': 0.0078108980749302444, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:58<03:22, 3.89s/it] 90%|█████████ | 469/520 [30:02<03:19, 3.91s/it] {'loss': 1.2777, 'grad_norm': 0.0006116102055657743, 'learning_rate': 0.0075158779990731, 'epoch': 0.9} + 90%|█████████ | 469/520 [30:02<03:19, 3.91s/it] 90%|█████████ | 470/520 [30:06<03:15, 3.91s/it] {'loss': 1.1491, 'grad_norm': 0.0005026861095020056, 'learning_rate': 0.007226394020801646, 'epoch': 0.9} + 90%|█████████ | 470/520 [30:06<03:15, 3.91s/it] 91%|█████████ | 471/520 [30:10<03:11, 3.91s/it] {'loss': 1.1888, 'grad_norm': 0.0005826765431464234, 'learning_rate': 0.006942457387765976, 'epoch': 0.91} + 91%|█████████ | 471/520 [30:10<03:11, 3.91s/it] 91%|█████████ | 472/520 [30:14<03:08, 3.92s/it] {'loss': 1.1455, 'grad_norm': 0.0005107991677500855, 'learning_rate': 0.0066640791320788815, 'epoch': 0.91} + 91%|█████████ | 472/520 [30:14<03:08, 3.92s/it] 91%|█████████ | 473/520 [30:18<03:04, 3.93s/it] {'loss': 1.2167, 'grad_norm': 0.000558096027337134, 'learning_rate': 0.006391270069887289, 'epoch': 0.91} + 91%|█████████ | 473/520 [30:18<03:04, 3.93s/it] 91%|█████████ | 474/520 [30:22<02:59, 3.91s/it] {'loss': 1.2693, 'grad_norm': 0.0005268281101312014, 'learning_rate': 0.006124040800951835, 'epoch': 0.91} + 91%|█████████ | 474/520 [30:22<02:59, 3.91s/it] 91%|█████████▏| 475/520 [30:26<02:53, 3.87s/it] {'loss': 1.1772, 'grad_norm': 0.0005187276572830328, 'learning_rate': 0.005862401708235076, 'epoch': 0.91} + 91%|█████████▏| 475/520 [30:26<02:53, 3.87s/it] 92%|█████████▏| 476/520 [30:29<02:48, 3.83s/it] {'loss': 1.2004, 'grad_norm': 0.000557615251794023, 'learning_rate': 0.0056063629574981955, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:29<02:48, 3.83s/it] 92%|█████████▏| 477/520 [30:33<02:43, 3.80s/it] {'loss': 1.195, 'grad_norm': 0.000731504207730939, 'learning_rate': 0.00535593449690585, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:33<02:43, 3.80s/it] 92%|█████████▏| 478/520 [30:37<02:38, 3.77s/it] {'loss': 1.1318, 'grad_norm': 0.0005213332196824825, 'learning_rate': 0.00511112605663977, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:37<02:38, 3.77s/it] 92%|█████████▏| 479/520 [30:41<02:33, 3.75s/it] {'loss': 1.2431, 'grad_norm': 0.0005880246398866401, 'learning_rate': 0.004871947148520584, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:41<02:33, 3.75s/it] 92%|█████████▏| 480/520 [30:44<02:29, 3.73s/it] {'loss': 1.2532, 'grad_norm': 0.0005270853905272142, 'learning_rate': 0.004638407065638323, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:44<02:29, 3.73s/it] 92%|█████████▎| 481/520 [30:48<02:27, 3.79s/it] {'loss': 1.2533, 'grad_norm': 0.0005047145715263915, 'learning_rate': 0.004410514881991357, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:48<02:27, 3.79s/it] 93%|█████████▎| 482/520 [30:52<02:24, 3.81s/it] {'loss': 1.2676, 'grad_norm': 0.0005271647158807165, 'learning_rate': 0.004188279452133825, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:52<02:24, 3.81s/it] 93%|█████████▎| 483/520 [30:56<02:21, 3.81s/it] {'loss': 1.2122, 'grad_norm': 0.0005499929263635626, 'learning_rate': 0.003971709410831498, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:56<02:21, 3.81s/it] 93%|█████████▎| 484/520 [31:00<02:16, 3.78s/it] {'loss': 1.2192, 'grad_norm': 0.0005597887174040409, 'learning_rate': 0.003760813172726457, 'epoch': 0.93} + 93%|█████████▎| 484/520 [31:00<02:16, 3.78s/it] 93%|█████████▎| 485/520 [31:03<02:11, 3.77s/it] {'loss': 1.1668, 'grad_norm': 0.0005227594893123322, 'learning_rate': 0.0035555989320099953, 'epoch': 0.93} + 93%|█████████▎| 485/520 [31:03<02:11, 3.77s/it] 93%|█████████▎| 486/520 [31:07<02:07, 3.74s/it] {'loss': 1.2918, 'grad_norm': 0.0005721144516734318, 'learning_rate': 0.003356074662104319, 'epoch': 0.93} + 93%|█████████▎| 486/520 [31:07<02:07, 3.74s/it] 94%|█████████▎| 487/520 [31:11<02:03, 3.73s/it] {'loss': 1.141, 'grad_norm': 0.0005269977216104061, 'learning_rate': 0.0031622481153527446, 'epoch': 0.94} + 94%|█████████▎| 487/520 [31:11<02:03, 3.73s/it] 94%|█████████▍| 488/520 [31:14<01:59, 3.73s/it] {'loss': 1.0853, 'grad_norm': 0.0005460097771246093, 'learning_rate': 0.0029741268227184256, 'epoch': 0.94} + 94%|█████████▍| 488/520 [31:14<01:59, 3.73s/it] 94%|█████████▍| 489/520 [31:18<01:54, 3.70s/it] {'loss': 1.2639, 'grad_norm': 0.0004927386428848035, 'learning_rate': 0.0027917180934918517, 'epoch': 0.94} + 94%|█████████▍| 489/520 [31:18<01:54, 3.70s/it] 94%|█████████▍| 490/520 [31:22<01:51, 3.70s/it] {'loss': 1.2103, 'grad_norm': 0.0005579507227905309, 'learning_rate': 0.002615029015006759, 'epoch': 0.94} + 94%|█████████▍| 490/520 [31:22<01:51, 3.70s/it] 94%|█████████▍| 491/520 [31:25<01:47, 3.71s/it] {'loss': 1.1731, 'grad_norm': 0.0005719106428562099, 'learning_rate': 0.0024440664523648014, 'epoch': 0.94} + 94%|█████████▍| 491/520 [31:25<01:47, 3.71s/it] 95%|█████████▍| 492/520 [31:29<01:43, 3.69s/it] {'loss': 1.2873, 'grad_norm': 0.0005796218119839116, 'learning_rate': 0.0022788370481687968, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:29<01:43, 3.69s/it] 95%|█████████▍| 493/520 [31:33<01:39, 3.68s/it] {'loss': 1.2888, 'grad_norm': 0.0005720584887781338, 'learning_rate': 0.002119347222264617, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:33<01:39, 3.68s/it] 95%|█████████▌| 494/520 [31:36<01:36, 3.70s/it] {'loss': 1.2207, 'grad_norm': 0.0005508904678174968, 'learning_rate': 0.0019656031714918366, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:36<01:36, 3.70s/it] 95%|█████████▌| 495/520 [31:40<01:32, 3.69s/it] {'loss': 1.1915, 'grad_norm': 0.000551678438552711, 'learning_rate': 0.0018176108694427928, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:40<01:32, 3.69s/it] 95%|█████████▌| 496/520 [31:44<01:29, 3.73s/it] {'loss': 1.1134, 'grad_norm': 0.0005342066912686522, 'learning_rate': 0.0016753760662307216, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:44<01:29, 3.73s/it] 96%|█████████▌| 497/520 [31:48<01:25, 3.74s/it] {'loss': 1.1942, 'grad_norm': 0.0005452679840837969, 'learning_rate': 0.001538904288266102, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:48<01:25, 3.74s/it] 96%|█████████▌| 498/520 [31:52<01:22, 3.76s/it] {'loss': 1.1928, 'grad_norm': 0.000575418128160864, 'learning_rate': 0.0014082008380420785, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:52<01:22, 3.76s/it] 96%|█████████▌| 499/520 [31:55<01:18, 3.75s/it] {'loss': 1.3433, 'grad_norm': 0.0005866566616497251, 'learning_rate': 0.0012832707939284427, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:55<01:18, 3.75s/it] 96%|█████████▌| 500/520 [31:59<01:14, 3.73s/it] {'loss': 1.3195, 'grad_norm': 0.0006046956054783096, 'learning_rate': 0.0011641190099741904, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:59<01:14, 3.73s/it] 96%|█████████▋| 501/520 [32:03<01:10, 3.71s/it] {'loss': 1.2495, 'grad_norm': 0.000577356349777013, 'learning_rate': 0.0010507501157190568, 'epoch': 0.96} + 96%|█████████▋| 501/520 [32:03<01:10, 3.71s/it] 97%|█████████▋| 502/520 [32:06<01:06, 3.70s/it] {'loss': 1.2222, 'grad_norm': 0.0005205210048610091, 'learning_rate': 0.0009431685160136093, 'epoch': 0.97} + 97%|█████████▋| 502/520 [32:06<01:06, 3.70s/it] 97%|█████████▋| 503/520 [32:10<01:02, 3.70s/it] {'loss': 1.2257, 'grad_norm': 0.0006183498486896358, 'learning_rate': 0.0008413783908480354, 'epoch': 0.97} + 97%|█████████▋| 503/520 [32:10<01:02, 3.70s/it] 97%|█████████▋| 504/520 [32:14<01:00, 3.80s/it] {'loss': 1.219, 'grad_norm': 0.0006093042303549783, 'learning_rate': 0.0007453836951897885, 'epoch': 0.97} + 97%|█████████▋| 504/520 [32:14<01:00, 3.80s/it] 97%|█████████▋| 505/520 [32:18<00:57, 3.84s/it] {'loss': 1.264, 'grad_norm': 0.0005556977711006343, 'learning_rate': 0.000655188158829928, 'epoch': 0.97} + 97%|█████████▋| 505/520 [32:18<00:57, 3.84s/it] 97%|█████████▋| 506/520 [32:22<00:53, 3.85s/it] {'loss': 1.1789, 'grad_norm': 0.000569571045369544, 'learning_rate': 0.0005707952862381682, 'epoch': 0.97} + 97%|█████████▋| 506/520 [32:22<00:53, 3.85s/it] 98%|█████████▊| 507/520 [32:26<00:50, 3.89s/it] {'loss': 1.3813, 'grad_norm': 0.0005350727904594297, 'learning_rate': 0.0004922083564267377, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:26<00:50, 3.89s/it] 98%|█████████▊| 508/520 [32:30<00:46, 3.89s/it] {'loss': 1.2925, 'grad_norm': 0.0005589545451360299, 'learning_rate': 0.0004194304228229806, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:30<00:46, 3.89s/it] 98%|█████████▊| 509/520 [32:34<00:42, 3.90s/it] {'loss': 1.2667, 'grad_norm': 0.0005246036862369504, 'learning_rate': 0.00035246431315066884, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:34<00:42, 3.90s/it] 98%|█████████▊| 510/520 [32:38<00:39, 3.91s/it] {'loss': 1.2198, 'grad_norm': 0.0005329034726341634, 'learning_rate': 0.00029131262932022284, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:38<00:39, 3.91s/it] 98%|█████████▊| 511/520 [32:41<00:35, 3.92s/it] {'loss': 1.1839, 'grad_norm': 0.0005595808058960428, 'learning_rate': 0.0002359777473275093, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:41<00:35, 3.92s/it] 98%|█████████▊| 512/520 [32:46<00:31, 3.96s/it] {'loss': 1.0739, 'grad_norm': 0.0005529966697836435, 'learning_rate': 0.00018646181716164834, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:46<00:31, 3.96s/it] 99%|█████████▊| 513/520 [32:49<00:27, 3.94s/it] {'loss': 1.2749, 'grad_norm': 0.0006309396514378828, 'learning_rate': 0.00014276676272133025, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:49<00:27, 3.94s/it] 99%|█████████▉| 514/520 [32:53<00:23, 3.94s/it] {'loss': 1.2476, 'grad_norm': 0.0005324614427590008, 'learning_rate': 0.00010489428174020876, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:53<00:23, 3.94s/it] 99%|█████████▉| 515/520 [32:57<00:19, 3.94s/it] {'loss': 1.307, 'grad_norm': 0.0006627586955346073, 'learning_rate': 7.284584572085362e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:57<00:19, 3.94s/it] 99%|█████████▉| 516/520 [33:01<00:15, 3.92s/it] {'loss': 1.2061, 'grad_norm': 0.0005932641295219582, 'learning_rate': 4.662269987756318e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [33:01<00:15, 3.92s/it] 99%|█████████▉| 517/520 [33:05<00:11, 3.89s/it] {'loss': 1.2789, 'grad_norm': 0.0005489247778512178, 'learning_rate': 2.6225863088036316e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [33:05<00:11, 3.89s/it] 100%|█████████▉| 518/520 [33:09<00:07, 3.82s/it] {'loss': 1.2187, 'grad_norm': 0.0006320779963779943, 'learning_rate': 1.1656127853770792e-05, 'epoch': 1.0} + 100%|█████████▉| 518/520 [33:09<00:07, 3.82s/it] 100%|█████████▉| 519/520 [33:12<00:03, 3.76s/it] {'loss': 1.2383, 'grad_norm': 0.0005815166608795439, 'learning_rate': 2.9140602692712123e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [33:12<00:03, 3.76s/it] 100%|██████████| 520/520 [33:17<00:00, 4.01s/it] {'loss': 1.2601, 'grad_norm': 0.0005318607288778074, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [33:17<00:00, 4.01s/it] {'train_runtime': 1997.388, 'train_samples_per_second': 33.308, 'train_steps_per_second': 0.26, 'train_loss': 1.2803244152894386, 'epoch': 1.0} + 100%|██████████| 520/520 [33:17<00:00, 4.01s/it] 100%|██████████| 520/520 [33:17<00:00, 3.84s/it] +[2025-10-10 10:28:55,644] [INFO] [launch.py:348:main] Process 1964837 exits successfully. +[2025-10-10 10:28:55,645] [INFO] [launch.py:348:main] Process 1964833 exits successfully. +[2025-10-10 10:28:55,645] [INFO] [launch.py:348:main] Process 1964836 exits successfully. +[2025-10-10 10:28:56,647] [INFO] [launch.py:348:main] Process 1964838 exits successfully. +[2025-10-10 10:28:56,647] [INFO] [launch.py:348:main] Process 1964839 exits successfully. +[2025-10-10 10:28:56,648] [INFO] [launch.py:348:main] Process 1964835 exits successfully. +[2025-10-10 10:28:56,648] [INFO] [launch.py:348:main] Process 1964834 exits successfully. +[2025-10-10 10:29:00,653] [INFO] [launch.py:348:main] Process 1964832 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251010_095404.log +Timestamp: 2025-10-10 10:29:03 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_060110.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_060110.log new file mode 100644 index 0000000000000000000000000000000000000000..66d3b2ad0cf72cdfdd22fe216100289d482c5d73 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_060110.log @@ -0,0 +1,1167 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_060110.log +Timestamp: 2025-10-10 06:01:10 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:01:13,517] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:01:16,136] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 06:01:16,138] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json --image_folder /nfs/ywang29/tinyLLaVA/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 5 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 5 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:01:18,796] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:01:19,839] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 06:01:19,839] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 06:01:19,840] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 06:01:19,840] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 06:01:19,840] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 06:01:19,840] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 06:01:19,840] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 06:01:19,842] [INFO] [launch.py:253:main] process 1783227 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:01:19,844] [INFO] [launch.py:253:main] process 1783228 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:01:19,846] [INFO] [launch.py:253:main] process 1783229 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:01:19,848] [INFO] [launch.py:253:main] process 1783230 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:01:19,850] [INFO] [launch.py:253:main] process 1783231 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:01:19,852] [INFO] [launch.py:253:main] process 1783232 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:01:19,854] [INFO] [launch.py:253:main] process 1783233 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 06:01:19,856] [INFO] [launch.py:253:main] process 1783234 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:01:26,335] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:01:26,680] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:01:26,693] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:01:26,727] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:01:26,757] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:01:26,757] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:01:26,802] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:01:26,804] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:01:26,809] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 06:01:27,076] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:01:27,097] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:01:27,097] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 06:01:27,162] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:01:27,162] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:01:27,204] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:01:27,205] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 06:01:27,210] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1783227:1783227 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783227:1783227 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783227:1783227 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1783227:1783227 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1783227:1783227 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1783227:1783227 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1783228:1783228 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1783228:1783228 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783228:1783228 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783228:1783228 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1783228:1783228 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1783228:1783228 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1783229:1783229 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1783229:1783229 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783229:1783229 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783229:1783229 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1783229:1783229 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1783229:1783229 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1783233:1783233 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1783233:1783233 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783233:1783233 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783233:1783233 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1783233:1783233 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1783233:1783233 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1783230:1783230 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1783230:1783230 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783230:1783230 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783230:1783230 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1783230:1783230 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1783230:1783230 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1783234:1783234 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1783234:1783234 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783234:1783234 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783234:1783234 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1783234:1783234 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1783234:1783234 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1783231:1783231 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1783231:1783231 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783231:1783231 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783231:1783231 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1783231:1783231 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1783231:1783231 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1783232:1783232 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1783232:1783232 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783232:1783232 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783232:1783232 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1783232:1783232 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1783232:1783232 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO ncclCommInitRank comm 0x563f9bb5b780 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x91770bdd8865f56a - Init START +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO ncclCommInitRank comm 0x5571630fa850 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x91770bdd8865f56a - Init START +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO ncclCommInitRank comm 0x562106417200 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x91770bdd8865f56a - Init START +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO ncclCommInitRank comm 0x559aa26cf630 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x91770bdd8865f56a - Init START +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO ncclCommInitRank comm 0x55b3c94b28e0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x91770bdd8865f56a - Init START +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO ncclCommInitRank comm 0x55d1cec991f0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x91770bdd8865f56a - Init START +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO ncclCommInitRank comm 0x561a228583a0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x91770bdd8865f56a - Init START +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO ncclCommInitRank comm 0x56063e753da0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x91770bdd8865f56a - Init START +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO comm 0x559aa26cf630 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO comm 0x55b3c94b28e0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO comm 0x55d1cec991f0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO comm 0x56063e753da0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO comm 0x562106417200 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO comm 0x561a228583a0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO comm 0x563f9bb5b780 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO comm 0x5571630fa850 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1783232:1784846 [5] NCCL INFO ncclCommInitRank comm 0x563f9bb5b780 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x91770bdd8865f56a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1783234:1784844 [7] NCCL INFO ncclCommInitRank comm 0x55b3c94b28e0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x91770bdd8865f56a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1783231:1784845 [4] NCCL INFO ncclCommInitRank comm 0x5571630fa850 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x91770bdd8865f56a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1783233:1784842 [6] NCCL INFO ncclCommInitRank comm 0x55d1cec991f0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x91770bdd8865f56a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1783227:1784839 [0] NCCL INFO ncclCommInitRank comm 0x561a228583a0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x91770bdd8865f56a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1783230:1784843 [3] NCCL INFO ncclCommInitRank comm 0x559aa26cf630 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x91770bdd8865f56a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1783229:1784841 [2] NCCL INFO ncclCommInitRank comm 0x56063e753da0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x91770bdd8865f56a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1783228:1784840 [1] NCCL INFO ncclCommInitRank comm 0x562106417200 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x91770bdd8865f56a - Init COMPLETE +[2025-10-10 06:02:15,314] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 06:02:17,072] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train +train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer,data_module = make_supervised_data_module(tokenizer=tokenizer, + + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train()data_module = make_supervised_data_module(tokenizer=tokenizer,train() + + + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer,train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r"))list_data_dict = json.load(open(data_path, "r")) + +train_dataset = LazySupervisedDataset(tokenizer=tokenizer, +FileNotFoundErrorFileNotFoundError File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ +: : [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json'[Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + train()list_data_dict = json.load(open(data_path, "r")) + + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Traceback (most recent call last): + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 193, in + train() + File "/nfs/ywang29/TinyLLaVA/tinyllava/train/train.py", line 172, in train + data_module = make_supervised_data_module(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 142, in make_supervised_data_module + train_dataset = LazySupervisedDataset(tokenizer=tokenizer, + File "/nfs/ywang29/TinyLLaVA/tinyllava/data/dataset.py", line 30, in __init__ + list_data_dict = json.load(open(data_path, "r")) +FileNotFoundError: [Errno 2] No such file or directory: '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json' +[2025-10-10 06:02:19,925] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1783227 +[2025-10-10 06:02:19,927] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1783228 +[2025-10-10 06:02:20,140] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1783229 +[2025-10-10 06:02:20,142] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1783230 +[2025-10-10 06:02:20,143] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1783231 +[2025-10-10 06:02:20,144] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1783232 +[2025-10-10 06:02:20,145] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1783233 +[2025-10-10 06:02:20,145] [INFO] [launch.py:316:sigkill_handler] Killing subprocess 1783234 +[2025-10-10 06:02:20,146] [ERROR] [launch.py:322:sigkill_handler] ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/nfs/ywang29/tinyLLaVA/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/nfs/ywang29/tinyLLaVA/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] exits with return code = 1 +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_060110.log +Timestamp: 2025-10-10 06:02:21 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_072428.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_072428.log new file mode 100644 index 0000000000000000000000000000000000000000..72289ff224bf712249521ad181dddf7f629321cf --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_072428.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_072428.log +Timestamp: 2025-10-10 07:24:28 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 07:24:31,210] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:34,063] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 07:24:34,065] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 5 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 5 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 07:24:36,718] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:37,765] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 07:24:37,765] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 07:24:37,765] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 07:24:37,765] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 07:24:37,766] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 07:24:37,766] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 07:24:37,766] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 07:24:37,768] [INFO] [launch.py:253:main] process 1832681 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:37,770] [INFO] [launch.py:253:main] process 1832682 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:37,772] [INFO] [launch.py:253:main] process 1832683 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:37,774] [INFO] [launch.py:253:main] process 1832684 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:37,778] [INFO] [launch.py:253:main] process 1832685 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:37,782] [INFO] [launch.py:253:main] process 1832686 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:37,787] [INFO] [launch.py:253:main] process 1832687 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 07:24:37,790] [INFO] [launch.py:253:main] process 1832688 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 07:24:44,438] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:44,607] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:44,638] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:44,673] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:44,673] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:44,673] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:44,673] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:44,685] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 07:24:44,845] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:45,016] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:45,016] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 07:24:45,046] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:45,078] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:45,079] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:45,080] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:45,081] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 07:24:45,089] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1832681:1832681 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832681:1832681 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832681:1832681 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1832681:1832681 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1832681:1832681 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1832681:1832681 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:1832685:1832685 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1832685:1832685 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832685:1832685 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832688:1832688 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1832688:1832688 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832688:1832688 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832685:1832685 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1832685:1832685 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1832685:1832685 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1832688:1832688 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1832688:1832688 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1832688:1832688 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1832684:1832684 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1832684:1832684 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832684:1832684 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832684:1832684 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1832684:1832684 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1832684:1832684 [3] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1832687:1832687 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1832687:1832687 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832687:1832687 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832687:1832687 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1832687:1832687 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1832687:1832687 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1832682:1832682 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1832682:1832682 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832682:1832682 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832682:1832682 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1832682:1832682 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1832682:1832682 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1832686:1832686 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1832686:1832686 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832686:1832686 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832686:1832686 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1832686:1832686 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1832686:1832686 [5] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1832683:1832683 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1832683:1832683 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832683:1832683 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832683:1832683 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1832683:1832683 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1832683:1832683 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO ncclCommInitRank comm 0x56462b12fbd0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xb8dcfa05dccdf99d - Init START +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO ncclCommInitRank comm 0x55ea7316dee0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xb8dcfa05dccdf99d - Init START +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO ncclCommInitRank comm 0x55e2d7dc19e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xb8dcfa05dccdf99d - Init START +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO ncclCommInitRank comm 0x55a56e0e4c60 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xb8dcfa05dccdf99d - Init START +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO ncclCommInitRank comm 0x5565c1f5d460 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xb8dcfa05dccdf99d - Init START +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO ncclCommInitRank comm 0x562ccb1c0000 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xb8dcfa05dccdf99d - Init START +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO ncclCommInitRank comm 0x561e6cb3c730 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xb8dcfa05dccdf99d - Init START +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO ncclCommInitRank comm 0x562ad0c2dbf0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xb8dcfa05dccdf99d - Init START +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO comm 0x562ccb1c0000 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO comm 0x55ea7316dee0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO comm 0x56462b12fbd0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO comm 0x562ad0c2dbf0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO comm 0x55e2d7dc19e0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO comm 0x561e6cb3c730 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO comm 0x55a56e0e4c60 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO comm 0x5565c1f5d460 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1832686:1834269 [5] NCCL INFO ncclCommInitRank comm 0x55a56e0e4c60 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xb8dcfa05dccdf99d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832687:1834267 [6] NCCL INFO ncclCommInitRank comm 0x55e2d7dc19e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xb8dcfa05dccdf99d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832688:1834248 [7] NCCL INFO ncclCommInitRank comm 0x56462b12fbd0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xb8dcfa05dccdf99d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832685:1834249 [4] NCCL INFO ncclCommInitRank comm 0x5565c1f5d460 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xb8dcfa05dccdf99d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1832682:1834268 [1] NCCL INFO ncclCommInitRank comm 0x561e6cb3c730 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xb8dcfa05dccdf99d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832684:1834258 [3] NCCL INFO ncclCommInitRank comm 0x562ccb1c0000 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xb8dcfa05dccdf99d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832683:1834270 [2] NCCL INFO ncclCommInitRank comm 0x55ea7316dee0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xb8dcfa05dccdf99d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1832681:1834247 [0] NCCL INFO ncclCommInitRank comm 0x562ad0c2dbf0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xb8dcfa05dccdf99d - Init COMPLETE +[2025-10-10 07:25:33,455] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 07:32:27,391] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 07:32:45,978 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 07:32:45,986 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:006->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1832681:1839540 [0] NCCL INFO ncclCommInitRank comm 0x7f497406b330 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x8fb0d6aa03c0a7cc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832683:1839544 [2] NCCL INFO ncclCommInitRank comm 0x7f25ec06b020 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x8fb0d6aa03c0a7cc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832685:1839543 [4] NCCL INFO ncclCommInitRank comm 0x7f873406aa70 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x8fb0d6aa03c0a7cc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832687:1839541 [6] NCCL INFO ncclCommInitRank comm 0x7f989006b910 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x8fb0d6aa03c0a7cc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832686:1839542 [5] NCCL INFO ncclCommInitRank comm 0x7ff6fc06b010 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x8fb0d6aa03c0a7cc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832682:1839547 [1] NCCL INFO ncclCommInitRank comm 0x7f547006a8a0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x8fb0d6aa03c0a7cc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832684:1839546 [3] NCCL INFO ncclCommInitRank comm 0x7efdf806ac10 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x8fb0d6aa03c0a7cc - Init COMPLETE +ywang29-vrdb-test1-worker-0:1832688:1839545 [7] NCCL INFO ncclCommInitRank comm 0x7f14c806a2a0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x8fb0d6aa03c0a7cc - Init COMPLETE + 0%| | 1/520 [00:14<2:02:42, 14.18s/it] {'loss': 2.0453, 'grad_norm': 0.004835224373788036, 'learning_rate': 0.3125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:02:42, 14.18s/it] 0%| | 2/520 [00:18<1:10:28, 8.16s/it] {'loss': 2.0549, 'grad_norm': 0.005249929419997777, 'learning_rate': 0.625, 'epoch': 0.0} + 0%| | 2/520 [00:18<1:10:28, 8.16s/it] 1%| | 3/520 [00:22<53:34, 6.22s/it] {'loss': 1.6757, 'grad_norm': 0.0017020618147032104, 'learning_rate': 0.9375, 'epoch': 0.01} + 1%| | 3/520 [00:22<53:34, 6.22s/it] 1%| | 4/520 [00:25<45:05, 5.24s/it] {'loss': 1.5659, 'grad_norm': 0.0016017715219483635, 'learning_rate': 1.25, 'epoch': 0.01} + 1%| | 4/520 [00:25<45:05, 5.24s/it] 1%| | 5/520 [00:29<40:46, 4.75s/it] {'loss': 1.97, 'grad_norm': 0.009310899501572363, 'learning_rate': 1.5625, 'epoch': 0.01} + 1%| | 5/520 [00:29<40:46, 4.75s/it] 1%| | 6/520 [00:33<38:04, 4.45s/it] {'loss': 3.5814, 'grad_norm': 0.06921843651592846, 'learning_rate': 1.875, 'epoch': 0.01} + 1%| | 6/520 [00:33<38:04, 4.45s/it] 1%|▏ | 7/520 [00:37<36:21, 4.25s/it] {'loss': 16.4568, 'grad_norm': 1.430434675434729, 'learning_rate': 2.1875, 'epoch': 0.01} + 1%|▏ | 7/520 [00:37<36:21, 4.25s/it] 2%|▏ | 8/520 [00:41<36:45, 4.31s/it] {'loss': 18.901, 'grad_norm': 0.9605423862690351, 'learning_rate': 2.5, 'epoch': 0.02} + 2%|▏ | 8/520 [00:41<36:45, 4.31s/it] 2%|▏ | 9/520 [00:46<36:36, 4.30s/it] {'loss': 11.8876, 'grad_norm': 1.2339733532391046, 'learning_rate': 2.8125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:46<36:36, 4.30s/it] 2%|▏ | 10/520 [00:49<35:03, 4.13s/it] {'loss': 21.0653, 'grad_norm': 0.34475158627747815, 'learning_rate': 3.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:49<35:03, 4.13s/it] 2%|▏ | 11/520 [00:53<34:11, 4.03s/it] {'loss': 14.5346, 'grad_norm': 0.04013871799390144, 'learning_rate': 3.4375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:53<34:11, 4.03s/it] 2%|▏ | 12/520 [00:57<33:09, 3.92s/it] {'loss': 13.1297, 'grad_norm': 0.019481356450099633, 'learning_rate': 3.75, 'epoch': 0.02} + 2%|▏ | 12/520 [00:57<33:09, 3.92s/it][2025-10-10 07:33:52,240] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:01<34:10, 4.04s/it] {'loss': 11.7531, 'grad_norm': 0.016617576626993237, 'learning_rate': 4.0625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:01<34:10, 4.04s/it] 3%|▎ | 14/520 [01:05<33:07, 3.93s/it] {'loss': 11.3458, 'grad_norm': 0.011415024038314483, 'learning_rate': 4.375, 'epoch': 0.03} + 3%|▎ | 14/520 [01:05<33:07, 3.93s/it] 3%|▎ | 15/520 [01:08<32:27, 3.86s/it] {'loss': 11.5494, 'grad_norm': 0.005002568986861104, 'learning_rate': 4.6875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:08<32:27, 3.86s/it] 3%|▎ | 16/520 [01:12<31:57, 3.80s/it] {'loss': 11.3934, 'grad_norm': 0.0022662046753397667, 'learning_rate': 5.0, 'epoch': 0.03} + 3%|▎ | 16/520 [01:12<31:57, 3.80s/it] 3%|▎ | 17/520 [01:16<31:32, 3.76s/it] {'loss': 10.5108, 'grad_norm': 0.005261301065513411, 'learning_rate': 4.999951432328845, 'epoch': 0.03} + 3%|▎ | 17/520 [01:16<31:32, 3.76s/it] 3%|▎ | 18/520 [01:19<31:14, 3.73s/it] {'loss': 9.9462, 'grad_norm': 0.0013446612600366292, 'learning_rate': 4.999805731202437, 'epoch': 0.03} + 3%|▎ | 18/520 [01:19<31:14, 3.73s/it] 4%|▎ | 19/520 [01:23<31:02, 3.72s/it] {'loss': 11.0228, 'grad_norm': 0.0008932732530198173, 'learning_rate': 4.999562902281866, 'epoch': 0.04} + 4%|▎ | 19/520 [01:23<31:02, 3.72s/it] 4%|▍ | 20/520 [01:27<30:57, 3.71s/it] {'loss': 9.8375, 'grad_norm': 0.0009739592386576836, 'learning_rate': 4.999222955002041, 'epoch': 0.04} + 4%|▍ | 20/520 [01:27<30:57, 3.71s/it] 4%|▍ | 21/520 [01:31<30:46, 3.70s/it] {'loss': 10.8971, 'grad_norm': 0.0007962619406639934, 'learning_rate': 4.998785902571319, 'epoch': 0.04} + 4%|▍ | 21/520 [01:31<30:46, 3.70s/it] 4%|▍ | 22/520 [01:34<30:39, 3.69s/it] {'loss': 10.0887, 'grad_norm': 0.0009519520955998987, 'learning_rate': 4.998251761970996, 'epoch': 0.04} + 4%|▍ | 22/520 [01:34<30:39, 3.69s/it] 4%|▍ | 23/520 [01:38<31:00, 3.74s/it] {'loss': 9.8962, 'grad_norm': 0.0007849147167655599, 'learning_rate': 4.997620553954645, 'epoch': 0.04} + 4%|▍ | 23/520 [01:38<31:00, 3.74s/it] 5%|▍ | 24/520 [01:42<31:17, 3.79s/it] {'loss': 10.6621, 'grad_norm': 0.0005495304868750315, 'learning_rate': 4.996892303047305, 'epoch': 0.05} + 5%|▍ | 24/520 [01:42<31:17, 3.79s/it] 5%|▍ | 25/520 [01:46<31:34, 3.83s/it] {'loss': 10.0984, 'grad_norm': 0.0006879411291955044, 'learning_rate': 4.996067037544542, 'epoch': 0.05} + 5%|▍ | 25/520 [01:46<31:34, 3.83s/it] 5%|▌ | 26/520 [01:50<31:43, 3.85s/it] {'loss': 10.071, 'grad_norm': 0.0005623500192777745, 'learning_rate': 4.99514478951133, 'epoch': 0.05} + 5%|▌ | 26/520 [01:50<31:43, 3.85s/it] 5%|▌ | 27/520 [01:53<31:14, 3.80s/it] {'loss': 9.6699, 'grad_norm': 0.0005009612620891973, 'learning_rate': 4.994125594780822, 'epoch': 0.05} + 5%|▌ | 27/520 [01:53<31:14, 3.80s/it] 5%|▌ | 28/520 [01:57<30:57, 3.78s/it] {'loss': 9.626, 'grad_norm': 0.0005833860727659698, 'learning_rate': 4.99300949295295, 'epoch': 0.05} + 5%|▌ | 28/520 [01:57<30:57, 3.78s/it] 6%|▌ | 29/520 [02:01<30:38, 3.74s/it] {'loss': 9.4832, 'grad_norm': 0.0006947217904509704, 'learning_rate': 4.9917965273928875, 'epoch': 0.06} + 6%|▌ | 29/520 [02:01<30:38, 3.74s/it] 6%|▌ | 30/520 [02:05<30:37, 3.75s/it] {'loss': 10.6055, 'grad_norm': 0.0003750300302423547, 'learning_rate': 4.990486745229364, 'epoch': 0.06} + 6%|▌ | 30/520 [02:05<30:37, 3.75s/it] 6%|▌ | 31/520 [02:08<30:25, 3.73s/it] {'loss': 9.3011, 'grad_norm': 0.00048140766841390305, 'learning_rate': 4.989080197352834, 'epoch': 0.06} + 6%|▌ | 31/520 [02:08<30:25, 3.73s/it] 6%|▌ | 32/520 [02:12<30:14, 3.72s/it] {'loss': 11.5317, 'grad_norm': 0.0004117265618604435, 'learning_rate': 4.987576938413504, 'epoch': 0.06} + 6%|▌ | 32/520 [02:12<30:14, 3.72s/it] 6%|▋ | 33/520 [02:16<30:01, 3.70s/it] {'loss': 9.8484, 'grad_norm': 0.00036118542534329556, 'learning_rate': 4.985977026819199, 'epoch': 0.06} + 6%|▋ | 33/520 [02:16<30:01, 3.70s/it] 7%|▋ | 34/520 [02:19<29:47, 3.68s/it] {'loss': 9.548, 'grad_norm': 0.00048522673550612595, 'learning_rate': 4.984280524733107, 'epoch': 0.07} + 7%|▋ | 34/520 [02:19<29:47, 3.68s/it] 7%|▋ | 35/520 [02:23<29:40, 3.67s/it] {'loss': 9.6926, 'grad_norm': 0.00048561799197089473, 'learning_rate': 4.9824874980713485, 'epoch': 0.07} + 7%|▋ | 35/520 [02:23<29:40, 3.67s/it] 7%|▋ | 36/520 [02:27<29:41, 3.68s/it] {'loss': 10.2371, 'grad_norm': 0.00037377774866260505, 'learning_rate': 4.98059801650043, 'epoch': 0.07} + 7%|▋ | 36/520 [02:27<29:41, 3.68s/it] 7%|▋ | 37/520 [02:30<29:35, 3.68s/it] {'loss': 10.4844, 'grad_norm': 0.00034997164729929, 'learning_rate': 4.9786121534345265, 'epoch': 0.07} + 7%|▋ | 37/520 [02:30<29:35, 3.68s/it] 7%|▋ | 38/520 [02:34<29:32, 3.68s/it] {'loss': 10.2193, 'grad_norm': 0.0003813568757169779, 'learning_rate': 4.976529986032632, 'epoch': 0.07} + 7%|▋ | 38/520 [02:34<29:32, 3.68s/it] 8%|▊ | 39/520 [02:38<29:26, 3.67s/it] {'loss': 9.5787, 'grad_norm': 0.0003028657182995379, 'learning_rate': 4.974351595195565, 'epoch': 0.07} + 8%|▊ | 39/520 [02:38<29:26, 3.67s/it] 8%|▊ | 40/520 [02:41<29:16, 3.66s/it] {'loss': 9.7107, 'grad_norm': 0.0003012960128771908, 'learning_rate': 4.9720770655628215, 'epoch': 0.08} + 8%|▊ | 40/520 [02:41<29:16, 3.66s/it] 8%|▊ | 41/520 [02:45<29:10, 3.66s/it] {'loss': 9.7759, 'grad_norm': 0.0003367441085404817, 'learning_rate': 4.9697064855092865, 'epoch': 0.08} + 8%|▊ | 41/520 [02:45<29:10, 3.66s/it] 8%|▊ | 42/520 [02:49<29:04, 3.65s/it] {'loss': 10.2185, 'grad_norm': 0.00035834173187060256, 'learning_rate': 4.9672399471418025, 'epoch': 0.08} + 8%|▊ | 42/520 [02:49<29:04, 3.65s/it] 8%|▊ | 43/520 [02:52<29:02, 3.65s/it] {'loss': 10.7335, 'grad_norm': 0.00043760651952857066, 'learning_rate': 4.964677546295589, 'epoch': 0.08} + 8%|▊ | 43/520 [02:52<29:02, 3.65s/it] 8%|▊ | 44/520 [02:56<28:58, 3.65s/it] {'loss': 10.7469, 'grad_norm': 0.00025232016501976857, 'learning_rate': 4.962019382530521, 'epoch': 0.08} + 8%|▊ | 44/520 [02:56<28:58, 3.65s/it] 9%|▊ | 45/520 [03:00<29:01, 3.67s/it] {'loss': 9.4534, 'grad_norm': 0.0003177625639987389, 'learning_rate': 4.959265559127253, 'epoch': 0.09} + 9%|▊ | 45/520 [03:00<29:01, 3.67s/it] 9%|▉ | 46/520 [03:03<28:55, 3.66s/it] {'loss': 11.3117, 'grad_norm': 0.0002561572545136305, 'learning_rate': 4.9564161830832205, 'epoch': 0.09} + 9%|▉ | 46/520 [03:03<28:55, 3.66s/it] 9%|▉ | 47/520 [03:07<28:57, 3.67s/it] {'loss': 9.8645, 'grad_norm': 0.00021407405367578557, 'learning_rate': 4.953471365108469, 'epoch': 0.09} + 9%|▉ | 47/520 [03:07<28:57, 3.67s/it] 9%|▉ | 48/520 [03:11<28:49, 3.67s/it] {'loss': 9.6064, 'grad_norm': 0.0002306531072441522, 'learning_rate': 4.950431219621359, 'epoch': 0.09} + 9%|▉ | 48/520 [03:11<28:49, 3.67s/it] 9%|▉ | 49/520 [03:14<28:50, 3.67s/it] {'loss': 9.6323, 'grad_norm': 0.000253921793217252, 'learning_rate': 4.947295864744121, 'epoch': 0.09} + 9%|▉ | 49/520 [03:14<28:50, 3.67s/it] 10%|▉ | 50/520 [03:18<28:45, 3.67s/it] {'loss': 9.576, 'grad_norm': 0.0002474931060139614, 'learning_rate': 4.944065422298261, 'epoch': 0.1} + 10%|▉ | 50/520 [03:18<28:45, 3.67s/it] 10%|▉ | 51/520 [03:22<28:38, 3.66s/it] {'loss': 9.2818, 'grad_norm': 0.0002788386450939769, 'learning_rate': 4.9407400177998335, 'epoch': 0.1} + 10%|▉ | 51/520 [03:22<28:38, 3.66s/it] 10%|█ | 52/520 [03:25<28:39, 3.67s/it] {'loss': 10.0282, 'grad_norm': 0.00022864443061443817, 'learning_rate': 4.937319780454558, 'epoch': 0.1} + 10%|█ | 52/520 [03:25<28:39, 3.67s/it] 10%|█ | 53/520 [03:29<28:34, 3.67s/it] {'loss': 9.9123, 'grad_norm': 0.0002522084226518836, 'learning_rate': 4.933804843152808, 'epoch': 0.1} + 10%|█ | 53/520 [03:29<28:34, 3.67s/it] 10%|█ | 54/520 [03:33<28:25, 3.66s/it] {'loss': 9.2381, 'grad_norm': 0.00026530182161921446, 'learning_rate': 4.930195342464437, 'epoch': 0.1} + 10%|█ | 54/520 [03:33<28:25, 3.66s/it] 11%|█ | 55/520 [03:36<28:19, 3.66s/it] {'loss': 9.5782, 'grad_norm': 0.0003659142778341966, 'learning_rate': 4.926491418633478, 'epoch': 0.11} + 11%|█ | 55/520 [03:36<28:19, 3.66s/it] 11%|█ | 56/520 [03:40<28:18, 3.66s/it] {'loss': 9.9148, 'grad_norm': 0.000368489366187492, 'learning_rate': 4.922693215572695, 'epoch': 0.11} + 11%|█ | 56/520 [03:40<28:18, 3.66s/it] 11%|█ | 57/520 [03:44<28:12, 3.65s/it] {'loss': 9.4414, 'grad_norm': 0.0002422170285645259, 'learning_rate': 4.918800880857991, 'epoch': 0.11} + 11%|█ | 57/520 [03:44<28:12, 3.65s/it] 11%|█ | 58/520 [03:47<28:10, 3.66s/it] {'loss': 9.7084, 'grad_norm': 0.00021680181085998563, 'learning_rate': 4.91481456572267, 'epoch': 0.11} + 11%|█ | 58/520 [03:47<28:10, 3.66s/it] 11%|█▏ | 59/520 [03:51<28:07, 3.66s/it] {'loss': 10.2897, 'grad_norm': 0.00019402080800765359, 'learning_rate': 4.91073442505157, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:51<28:07, 3.66s/it] 12%|█▏ | 60/520 [03:55<28:17, 3.69s/it] {'loss': 9.9188, 'grad_norm': 0.00016299532512117302, 'learning_rate': 4.90656061737503, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:55<28:17, 3.69s/it] 12%|█▏ | 61/520 [03:58<28:37, 3.74s/it] {'loss': 10.8586, 'grad_norm': 0.00016208539774192947, 'learning_rate': 4.9022933048627495, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:58<28:37, 3.74s/it] 12%|█▏ | 62/520 [04:02<28:47, 3.77s/it] {'loss': 9.9105, 'grad_norm': 0.0001774352508148728, 'learning_rate': 4.897932653317469, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:02<28:47, 3.77s/it] 12%|█▏ | 63/520 [04:06<28:47, 3.78s/it] {'loss': 9.528, 'grad_norm': 0.00017750107141785714, 'learning_rate': 4.893478832168546, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:06<28:47, 3.78s/it] 12%|█▏ | 64/520 [04:10<28:21, 3.73s/it] {'loss': 9.503, 'grad_norm': 0.00018917861616025533, 'learning_rate': 4.888932014465352, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:10<28:21, 3.73s/it] 12%|█▎ | 65/520 [04:13<28:08, 3.71s/it] {'loss': 9.9325, 'grad_norm': 0.0001928929163569368, 'learning_rate': 4.884292376870567, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:13<28:08, 3.71s/it] 13%|█▎ | 66/520 [04:17<28:00, 3.70s/it] {'loss': 9.9275, 'grad_norm': 0.00016030489347853115, 'learning_rate': 4.879560099653306, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:17<28:00, 3.70s/it] 13%|█▎ | 67/520 [04:21<27:45, 3.68s/it] {'loss': 9.4336, 'grad_norm': 0.0001641119363490267, 'learning_rate': 4.874735366682115, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:21<27:45, 3.68s/it] 13%|█▎ | 68/520 [04:24<27:38, 3.67s/it] {'loss': 9.1488, 'grad_norm': 0.00017119607424642008, 'learning_rate': 4.86981836541783, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:24<27:38, 3.67s/it] 13%|█▎ | 69/520 [04:28<27:30, 3.66s/it] {'loss': 9.2385, 'grad_norm': 0.00015325755867302905, 'learning_rate': 4.86480928690629, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:28<27:30, 3.66s/it] 13%|█▎ | 70/520 [04:32<27:24, 3.66s/it] {'loss': 9.4749, 'grad_norm': 0.0001406068398855722, 'learning_rate': 4.859708325770919, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:32<27:24, 3.66s/it] 14%|█▎ | 71/520 [04:35<27:24, 3.66s/it] {'loss': 9.1923, 'grad_norm': 0.00016900928056982962, 'learning_rate': 4.854515680205159, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:35<27:24, 3.66s/it] 14%|█▍ | 72/520 [04:39<27:23, 3.67s/it] {'loss': 9.6545, 'grad_norm': 0.00015982225990010602, 'learning_rate': 4.849231551964771, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:39<27:23, 3.67s/it] 14%|█▍ | 73/520 [04:43<27:17, 3.66s/it] {'loss': 9.1543, 'grad_norm': 0.00017007359996530023, 'learning_rate': 4.8438561463599985, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:43<27:17, 3.66s/it] 14%|█▍ | 74/520 [04:46<27:16, 3.67s/it] {'loss': 9.7744, 'grad_norm': 0.0001370918494701439, 'learning_rate': 4.838389672247585, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:46<27:16, 3.67s/it] 14%|█▍ | 75/520 [04:50<27:14, 3.67s/it] {'loss': 8.8451, 'grad_norm': 0.00020351653913829057, 'learning_rate': 4.832832342022666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:50<27:14, 3.67s/it] 15%|█▍ | 76/520 [04:54<27:19, 3.69s/it] {'loss': 10.7733, 'grad_norm': 0.00015811722468138495, 'learning_rate': 4.82718437161051, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:54<27:19, 3.69s/it] 15%|█▍ | 77/520 [04:57<27:15, 3.69s/it] {'loss': 9.3652, 'grad_norm': 0.00025785427270194226, 'learning_rate': 4.821445980458134, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:57<27:15, 3.69s/it] 15%|█▌ | 78/520 [05:01<27:11, 3.69s/it] {'loss': 9.1086, 'grad_norm': 0.0002153069214123274, 'learning_rate': 4.815617391525771, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:01<27:11, 3.69s/it] 15%|█▌ | 79/520 [05:05<27:15, 3.71s/it] {'loss': 9.4666, 'grad_norm': 0.00021115119308562211, 'learning_rate': 4.809698831278217, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:05<27:15, 3.71s/it] 15%|█▌ | 80/520 [05:09<27:26, 3.74s/it] {'loss': 11.2609, 'grad_norm': 0.0002707872488031616, 'learning_rate': 4.803690529676019, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:09<27:26, 3.74s/it] 16%|█▌ | 81/520 [05:12<27:12, 3.72s/it] {'loss': 10.3198, 'grad_norm': 0.00020727585587955425, 'learning_rate': 4.797592720166551, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:12<27:12, 3.72s/it] 16%|█▌ | 82/520 [05:16<27:05, 3.71s/it] {'loss': 9.5217, 'grad_norm': 0.00013570035216147852, 'learning_rate': 4.791405639674941, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:16<27:05, 3.71s/it] 16%|█▌ | 83/520 [05:20<27:00, 3.71s/it] {'loss': 9.8113, 'grad_norm': 0.00011252670358594406, 'learning_rate': 4.785129528594858, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:20<27:00, 3.71s/it] 16%|█▌ | 84/520 [05:23<26:56, 3.71s/it] {'loss': 9.7333, 'grad_norm': 0.00010528253949688765, 'learning_rate': 4.778764630779183, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:23<26:56, 3.71s/it] 16%|█▋ | 85/520 [05:27<26:54, 3.71s/it] {'loss': 9.5987, 'grad_norm': 0.00012238054912858554, 'learning_rate': 4.772311193530527, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:27<26:54, 3.71s/it] 17%|█▋ | 86/520 [05:31<26:47, 3.70s/it] {'loss': 10.1608, 'grad_norm': 0.00014013340981989484, 'learning_rate': 4.765769467591625, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:31<26:47, 3.70s/it] 17%|█▋ | 87/520 [05:35<26:49, 3.72s/it] {'loss': 10.7996, 'grad_norm': 0.00014776940178851767, 'learning_rate': 4.759139707135592, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:35<26:49, 3.72s/it] 17%|█▋ | 88/520 [05:38<26:46, 3.72s/it] {'loss': 11.3147, 'grad_norm': 0.00013158502824418928, 'learning_rate': 4.752422169756048, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:38<26:46, 3.72s/it] 17%|█▋ | 89/520 [05:42<26:38, 3.71s/it] {'loss': 9.6781, 'grad_norm': 0.00011526304485547349, 'learning_rate': 4.74561711645711, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:42<26:38, 3.71s/it] 17%|█▋ | 90/520 [05:46<26:30, 3.70s/it] {'loss': 9.3661, 'grad_norm': 0.0001435463543335388, 'learning_rate': 4.7387248116432525, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:46<26:30, 3.70s/it] 18%|█▊ | 91/520 [05:49<26:19, 3.68s/it] {'loss': 9.7438, 'grad_norm': 0.00011111115168079317, 'learning_rate': 4.731745523109029, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:49<26:19, 3.68s/it] 18%|█▊ | 92/520 [05:53<26:25, 3.70s/it] {'loss': 9.3693, 'grad_norm': 0.00015120340655302926, 'learning_rate': 4.724679522028672, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:53<26:25, 3.70s/it] 18%|█▊ | 93/520 [05:57<26:17, 3.69s/it] {'loss': 9.504, 'grad_norm': 0.00010831824238139634, 'learning_rate': 4.717527082945554, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:57<26:17, 3.69s/it] 18%|█▊ | 94/520 [06:00<26:12, 3.69s/it] {'loss': 9.9729, 'grad_norm': 9.872258682658706e-05, 'learning_rate': 4.710288483761524, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:00<26:12, 3.69s/it] 18%|█▊ | 95/520 [06:04<26:14, 3.70s/it] {'loss': 9.643, 'grad_norm': 0.0001277993448856244, 'learning_rate': 4.7029640057261055, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:04<26:14, 3.70s/it] 18%|█▊ | 96/520 [06:08<26:05, 3.69s/it] {'loss': 9.1206, 'grad_norm': 0.00017984138116573485, 'learning_rate': 4.695553933425572, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:08<26:05, 3.69s/it] 19%|█▊ | 97/520 [06:12<25:59, 3.69s/it] {'loss': 9.6583, 'grad_norm': 0.00017347722039538438, 'learning_rate': 4.688058554771884, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:12<25:59, 3.69s/it] 19%|█▉ | 98/520 [06:15<26:09, 3.72s/it] {'loss': 9.0208, 'grad_norm': 0.00021585041164780744, 'learning_rate': 4.680478160991513, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:15<26:09, 3.72s/it] 19%|█▉ | 99/520 [06:19<26:16, 3.75s/it] {'loss': 9.7989, 'grad_norm': 0.00010983825510770134, 'learning_rate': 4.672813046614116, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:19<26:16, 3.75s/it] 19%|█▉ | 100/520 [06:23<26:25, 3.78s/it] {'loss': 10.6143, 'grad_norm': 0.00011265003370080137, 'learning_rate': 4.665063509461097, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:23<26:25, 3.78s/it] 19%|█▉ | 101/520 [06:27<26:28, 3.79s/it] {'loss': 9.4461, 'grad_norm': 0.00014097465920111962, 'learning_rate': 4.657229850634033, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:27<26:28, 3.79s/it] 20%|█▉ | 102/520 [06:31<26:29, 3.80s/it] {'loss': 9.5076, 'grad_norm': 0.00011969357932293821, 'learning_rate': 4.649312374502975, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:31<26:29, 3.80s/it] 20%|█▉ | 103/520 [06:34<26:26, 3.80s/it] {'loss': 8.7938, 'grad_norm': 0.00023834616749687307, 'learning_rate': 4.6413113886946284, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:34<26:26, 3.80s/it] 20%|██ | 104/520 [06:38<26:23, 3.81s/it] {'loss': 9.7715, 'grad_norm': 8.705192893658956e-05, 'learning_rate': 4.633227204080389, 'epoch': 0.2} + 20%|██ | 104/520 [06:38<26:23, 3.81s/it] 20%|██ | 105/520 [06:42<26:21, 3.81s/it] {'loss': 9.4699, 'grad_norm': 0.00020264997470210462, 'learning_rate': 4.625060134764273, 'epoch': 0.2} + 20%|██ | 105/520 [06:42<26:21, 3.81s/it] 20%|██ | 106/520 [06:46<26:21, 3.82s/it] {'loss': 10.4476, 'grad_norm': 0.00014598571952289678, 'learning_rate': 4.61681049807071, 'epoch': 0.2} + 20%|██ | 106/520 [06:46<26:21, 3.82s/it] 21%|██ | 107/520 [06:50<26:13, 3.81s/it] {'loss': 10.4737, 'grad_norm': 0.00018707480700855624, 'learning_rate': 4.608478614532214, 'epoch': 0.21} + 21%|██ | 107/520 [06:50<26:13, 3.81s/it] 21%|██ | 108/520 [06:54<26:14, 3.82s/it] {'loss': 9.4955, 'grad_norm': 0.00015842306023413415, 'learning_rate': 4.6000648078769295, 'epoch': 0.21} + 21%|██ | 108/520 [06:54<26:14, 3.82s/it] 21%|██ | 109/520 [06:57<26:10, 3.82s/it] {'loss': 10.6363, 'grad_norm': 0.0001213235566820375, 'learning_rate': 4.591569405016049, 'epoch': 0.21} + 21%|██ | 109/520 [06:57<26:10, 3.82s/it] 21%|██ | 110/520 [07:01<26:04, 3.81s/it] {'loss': 10.0776, 'grad_norm': 0.0003057599566906018, 'learning_rate': 4.582992736031122, 'epoch': 0.21} + 21%|██ | 110/520 [07:01<26:04, 3.81s/it] 21%|██▏ | 111/520 [07:05<26:16, 3.85s/it] {'loss': 10.0663, 'grad_norm': 0.0001703225191102194, 'learning_rate': 4.574335134161219, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:05<26:16, 3.85s/it] 22%|██▏ | 112/520 [07:09<26:08, 3.85s/it] {'loss': 9.9964, 'grad_norm': 0.00023434701608080963, 'learning_rate': 4.565596935789987, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:09<26:08, 3.85s/it] 22%|██▏ | 113/520 [07:13<26:00, 3.83s/it] {'loss': 9.1146, 'grad_norm': 0.0002957066112207668, 'learning_rate': 4.556778480432583, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:13<26:00, 3.83s/it] 22%|██▏ | 114/520 [07:17<25:50, 3.82s/it] {'loss': 10.0861, 'grad_norm': 0.00020848431379290935, 'learning_rate': 4.547880110722479, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:17<25:50, 3.82s/it] 22%|██▏ | 115/520 [07:20<25:42, 3.81s/it] {'loss': 10.4843, 'grad_norm': 0.0004279684129026708, 'learning_rate': 4.53890217239815, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:20<25:42, 3.81s/it] 22%|██▏ | 116/520 [07:24<25:38, 3.81s/it] {'loss': 9.8973, 'grad_norm': 0.00015534317153595269, 'learning_rate': 4.529845014289641, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:24<25:38, 3.81s/it] 22%|██▎ | 117/520 [07:28<25:33, 3.81s/it] {'loss': 9.8939, 'grad_norm': 9.120359323843746e-05, 'learning_rate': 4.520708988305014, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:28<25:33, 3.81s/it] 23%|██▎ | 118/520 [07:32<25:25, 3.79s/it] {'loss': 9.2732, 'grad_norm': 0.0001255896301086903, 'learning_rate': 4.511494449416671, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:32<25:25, 3.79s/it] 23%|██▎ | 119/520 [07:36<25:32, 3.82s/it] {'loss': 8.9858, 'grad_norm': 0.0001409406110669847, 'learning_rate': 4.502201755647571, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:36<25:32, 3.82s/it] 23%|██▎ | 120/520 [07:39<25:25, 3.81s/it] {'loss': 9.4059, 'grad_norm': 0.00013959830788161305, 'learning_rate': 4.492831268057306, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:39<25:25, 3.81s/it] 23%|██▎ | 121/520 [07:43<25:22, 3.82s/it] {'loss': 9.1386, 'grad_norm': 0.00011412734404521219, 'learning_rate': 4.483383350728088, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:43<25:22, 3.82s/it] 23%|██▎ | 122/520 [07:47<25:19, 3.82s/it] {'loss': 9.0803, 'grad_norm': 0.00011631983426786681, 'learning_rate': 4.473858370750588, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:47<25:19, 3.82s/it] 24%|██▎ | 123/520 [07:51<25:07, 3.80s/it] {'loss': 10.5921, 'grad_norm': 0.00010055188975656146, 'learning_rate': 4.4642566982096845, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:51<25:07, 3.80s/it] 24%|██▍ | 124/520 [07:55<25:04, 3.80s/it] {'loss': 10.0445, 'grad_norm': 0.00012959383603221197, 'learning_rate': 4.454578706170075, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:55<25:04, 3.80s/it] 24%|██▍ | 125/520 [07:58<25:02, 3.80s/it] {'loss': 9.5449, 'grad_norm': 0.00010557838674264934, 'learning_rate': 4.444824770661787, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:58<25:02, 3.80s/it] 24%|██▍ | 126/520 [08:03<26:15, 4.00s/it] {'loss': 9.7383, 'grad_norm': 0.00015857009899820028, 'learning_rate': 4.434995270665569, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:03<26:15, 4.00s/it] 24%|██▍ | 127/520 [08:07<25:48, 3.94s/it] {'loss': 9.8128, 'grad_norm': 0.0001762662147763061, 'learning_rate': 4.425090588098158, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:07<25:48, 3.94s/it] 25%|██▍ | 128/520 [08:10<25:27, 3.90s/it] {'loss': 9.6064, 'grad_norm': 0.000155371106421216, 'learning_rate': 4.415111107797445, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:10<25:27, 3.90s/it] 25%|██▍ | 129/520 [08:14<25:16, 3.88s/it] {'loss': 8.9405, 'grad_norm': 0.00012879905235004435, 'learning_rate': 4.405057217507527, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:14<25:16, 3.88s/it] 25%|██▌ | 130/520 [08:18<25:06, 3.86s/it] {'loss': 9.9088, 'grad_norm': 0.0001193247749974103, 'learning_rate': 4.394929307863633, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:18<25:06, 3.86s/it] 25%|██▌ | 131/520 [08:22<24:57, 3.85s/it] {'loss': 10.3236, 'grad_norm': 0.00012770960477350948, 'learning_rate': 4.38472777237695, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:22<24:57, 3.85s/it] 25%|██▌ | 132/520 [08:26<24:50, 3.84s/it] {'loss': 9.7841, 'grad_norm': 0.00011019528090855855, 'learning_rate': 4.374453007419335, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:26<24:50, 3.84s/it] 26%|██▌ | 133/520 [08:30<24:44, 3.84s/it] {'loss': 9.6489, 'grad_norm': 0.0001249846499461722, 'learning_rate': 4.3641054122079135, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:30<24:44, 3.84s/it] 26%|██▌ | 134/520 [08:33<24:40, 3.83s/it] {'loss': 9.5683, 'grad_norm': 0.00010047102151961074, 'learning_rate': 4.353685388789566, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:33<24:40, 3.83s/it] 26%|██▌ | 135/520 [08:37<24:36, 3.83s/it] {'loss': 10.0368, 'grad_norm': 8.69176547570106e-05, 'learning_rate': 4.34319334202531, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:37<24:36, 3.83s/it] 26%|██▌ | 136/520 [08:41<24:27, 3.82s/it] {'loss': 9.3518, 'grad_norm': 8.58934623720891e-05, 'learning_rate': 4.332629679574565, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:41<24:27, 3.82s/it] 26%|██▋ | 137/520 [08:45<24:24, 3.82s/it] {'loss': 9.6254, 'grad_norm': 7.730650792053916e-05, 'learning_rate': 4.321994811879321, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:45<24:24, 3.82s/it] 27%|██▋ | 138/520 [08:49<24:21, 3.83s/it] {'loss': 9.3146, 'grad_norm': 9.23652494860408e-05, 'learning_rate': 4.3112891521481815, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:49<24:21, 3.83s/it] 27%|██▋ | 139/520 [08:53<24:19, 3.83s/it] {'loss': 9.8441, 'grad_norm': 0.00017144928675033689, 'learning_rate': 4.3005131163403165, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:53<24:19, 3.83s/it] 27%|██▋ | 140/520 [08:56<24:21, 3.85s/it] {'loss': 10.5011, 'grad_norm': 0.00010313662441326606, 'learning_rate': 4.289667123149296, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:56<24:21, 3.85s/it] 27%|██▋ | 141/520 [09:00<24:14, 3.84s/it] {'loss': 9.709, 'grad_norm': 8.227759310987518e-05, 'learning_rate': 4.278751593986826, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:00<24:14, 3.84s/it] 27%|██▋ | 142/520 [09:04<24:06, 3.83s/it] {'loss': 10.4504, 'grad_norm': 9.486650571431389e-05, 'learning_rate': 4.267766952966369, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:04<24:06, 3.83s/it] 28%|██▊ | 143/520 [09:08<23:59, 3.82s/it] {'loss': 9.6957, 'grad_norm': 8.909422648467854e-05, 'learning_rate': 4.256713626886673, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:08<23:59, 3.82s/it] 28%|██▊ | 144/520 [09:12<23:53, 3.81s/it] {'loss': 9.0262, 'grad_norm': 0.00011678593930658187, 'learning_rate': 4.245592045215182, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:12<23:53, 3.81s/it] 28%|██▊ | 145/520 [09:16<23:57, 3.83s/it] {'loss': 9.2425, 'grad_norm': 0.00011734786759496549, 'learning_rate': 4.234402640071354, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:16<23:57, 3.83s/it] 28%|██▊ | 146/520 [09:19<23:53, 3.83s/it] {'loss': 10.6514, 'grad_norm': 7.966962214241458e-05, 'learning_rate': 4.223145846209867, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:19<23:53, 3.83s/it] 28%|██▊ | 147/520 [09:23<23:53, 3.84s/it] {'loss': 9.0849, 'grad_norm': 9.749694591094631e-05, 'learning_rate': 4.211822101003734, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:23<23:53, 3.84s/it] 28%|██▊ | 148/520 [09:27<23:45, 3.83s/it] {'loss': 9.3465, 'grad_norm': 9.738050192185512e-05, 'learning_rate': 4.200431844427298, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:27<23:45, 3.83s/it] 29%|██▊ | 149/520 [09:31<23:24, 3.79s/it] {'loss': 9.4009, 'grad_norm': 7.820141234860104e-05, 'learning_rate': 4.18897551903915, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:31<23:24, 3.79s/it] 29%|██▉ | 150/520 [09:34<23:07, 3.75s/it] {'loss': 9.7439, 'grad_norm': 9.997553908986437e-05, 'learning_rate': 4.177453569964925, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:34<23:07, 3.75s/it] 29%|██▉ | 151/520 [09:38<22:50, 3.71s/it] {'loss': 9.1931, 'grad_norm': 0.0001024295045247026, 'learning_rate': 4.16586644488001, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:38<22:50, 3.71s/it] 29%|██▉ | 152/520 [09:42<22:41, 3.70s/it] {'loss': 9.3628, 'grad_norm': 0.00012642065217578638, 'learning_rate': 4.154214593992148, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:42<22:41, 3.70s/it] 29%|██▉ | 153/520 [09:45<22:37, 3.70s/it] {'loss': 9.2104, 'grad_norm': 0.00011713637531856827, 'learning_rate': 4.142498470023951, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:45<22:37, 3.70s/it] 30%|██▉ | 154/520 [09:49<22:28, 3.68s/it] {'loss': 9.617, 'grad_norm': 7.852065451971208e-05, 'learning_rate': 4.1307185281953025, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:49<22:28, 3.68s/it] 30%|██▉ | 155/520 [09:53<22:22, 3.68s/it] {'loss': 9.7038, 'grad_norm': 9.430790512145212e-05, 'learning_rate': 4.118875226205676, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:53<22:22, 3.68s/it] 30%|███ | 156/520 [09:56<22:31, 3.71s/it] {'loss': 9.8054, 'grad_norm': 8.44196297342399e-05, 'learning_rate': 4.106969024216348, 'epoch': 0.3} + 30%|███ | 156/520 [09:56<22:31, 3.71s/it] 30%|███ | 157/520 [10:00<22:25, 3.71s/it] {'loss': 10.7188, 'grad_norm': 0.00011355654593470861, 'learning_rate': 4.095000384832522, 'epoch': 0.3} + 30%|███ | 157/520 [10:00<22:25, 3.71s/it] 30%|███ | 158/520 [10:04<22:17, 3.70s/it] {'loss': 9.349, 'grad_norm': 9.752361647627756e-05, 'learning_rate': 4.08296977308535, 'epoch': 0.3} + 30%|███ | 158/520 [10:04<22:17, 3.70s/it] 31%|███ | 159/520 [10:08<22:13, 3.69s/it] {'loss': 9.2886, 'grad_norm': 0.00012667607815632954, 'learning_rate': 4.0708776564138685, 'epoch': 0.31} + 31%|███ | 159/520 [10:08<22:13, 3.69s/it] 31%|███ | 160/520 [10:11<22:02, 3.67s/it] {'loss': 9.3272, 'grad_norm': 0.00014079463262001393, 'learning_rate': 4.058724504646834, 'epoch': 0.31} + 31%|███ | 160/520 [10:11<22:02, 3.67s/it] 31%|███ | 161/520 [10:15<21:57, 3.67s/it] {'loss': 9.4815, 'grad_norm': 8.59769895933628e-05, 'learning_rate': 4.04651078998447, 'epoch': 0.31} + 31%|███ | 161/520 [10:15<21:57, 3.67s/it] 31%|███ | 162/520 [10:18<21:51, 3.66s/it] {'loss': 10.5536, 'grad_norm': 0.00011454096881675807, 'learning_rate': 4.034236986980119, 'epoch': 0.31} + 31%|███ | 162/520 [10:18<21:51, 3.66s/it] 31%|███▏ | 163/520 [10:22<21:48, 3.66s/it] {'loss': 9.1272, 'grad_norm': 0.00018724842701244523, 'learning_rate': 4.0219035725218015, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:22<21:48, 3.66s/it] 32%|███▏ | 164/520 [10:26<21:51, 3.68s/it] {'loss': 8.968, 'grad_norm': 0.00022692568365583968, 'learning_rate': 4.009511025813693, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:26<21:51, 3.68s/it] 32%|███▏ | 165/520 [10:30<21:56, 3.71s/it] {'loss': 9.3252, 'grad_norm': 0.00012578973817941943, 'learning_rate': 3.997059828357501, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:30<21:56, 3.71s/it] 32%|███▏ | 166/520 [10:33<21:55, 3.71s/it] {'loss': 9.2205, 'grad_norm': 0.00016901200210060136, 'learning_rate': 3.9845504639337537, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:33<21:55, 3.71s/it] 32%|███▏ | 167/520 [10:37<21:51, 3.71s/it] {'loss': 9.7206, 'grad_norm': 6.578242119701261e-05, 'learning_rate': 3.9719834185830116, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:37<21:51, 3.71s/it] 32%|███▏ | 168/520 [10:41<21:39, 3.69s/it] {'loss': 9.247, 'grad_norm': 0.0003145880040399131, 'learning_rate': 3.959359180586975, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:41<21:39, 3.69s/it] 32%|███▎ | 169/520 [10:44<21:32, 3.68s/it] {'loss': 9.4916, 'grad_norm': 6.788310642086711e-05, 'learning_rate': 3.946678240449515, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:44<21:32, 3.68s/it] 33%|███▎ | 170/520 [10:48<21:25, 3.67s/it] {'loss': 10.1229, 'grad_norm': 0.00013418846701052767, 'learning_rate': 3.933941090877615, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:48<21:25, 3.67s/it] 33%|███▎ | 171/520 [10:52<21:21, 3.67s/it] {'loss': 9.0274, 'grad_norm': 0.00014508725426488463, 'learning_rate': 3.921148226762231, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:52<21:21, 3.67s/it] 33%|███▎ | 172/520 [10:55<21:15, 3.67s/it] {'loss': 9.4589, 'grad_norm': 7.244107732597629e-05, 'learning_rate': 3.908300145159055, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:55<21:15, 3.67s/it] 33%|███▎ | 173/520 [10:59<21:12, 3.67s/it] {'loss': 9.0356, 'grad_norm': 8.66565297818254e-05, 'learning_rate': 3.8953973452692106, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:59<21:12, 3.67s/it] 33%|███▎ | 174/520 [11:03<21:07, 3.66s/it] {'loss': 9.6318, 'grad_norm': 7.077964459632354e-05, 'learning_rate': 3.8824403284198485, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:03<21:07, 3.66s/it] 34%|███▎ | 175/520 [11:06<21:09, 3.68s/it] {'loss': 9.1232, 'grad_norm': 8.384051927939373e-05, 'learning_rate': 3.8694295980446785, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:06<21:09, 3.68s/it] 34%|███▍ | 176/520 [11:10<21:00, 3.67s/it] {'loss': 10.6908, 'grad_norm': 5.8983043273803514e-05, 'learning_rate': 3.8563656596643985, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:10<21:00, 3.67s/it] 34%|███▍ | 177/520 [11:14<21:00, 3.68s/it] {'loss': 10.1553, 'grad_norm': 9.24868930351543e-05, 'learning_rate': 3.84324902086706, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:14<21:00, 3.68s/it] 34%|███▍ | 178/520 [11:17<20:53, 3.67s/it] {'loss': 9.523, 'grad_norm': 6.537888449760684e-05, 'learning_rate': 3.8300801912883413, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:17<20:53, 3.67s/it] 34%|███▍ | 179/520 [11:21<20:47, 3.66s/it] {'loss': 9.3923, 'grad_norm': 7.36470823028055e-05, 'learning_rate': 3.8168596825917516, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:21<20:47, 3.66s/it] 35%|███▍ | 180/520 [11:25<20:43, 3.66s/it] {'loss': 9.5571, 'grad_norm': 6.747831679889896e-05, 'learning_rate': 3.8035880084487452, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:25<20:43, 3.66s/it] 35%|███▍ | 181/520 [11:28<20:37, 3.65s/it] {'loss': 9.1588, 'grad_norm': 7.481610684432795e-05, 'learning_rate': 3.7902656845187668, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:28<20:37, 3.65s/it] 35%|███▌ | 182/520 [11:32<20:31, 3.64s/it] {'loss': 9.5321, 'grad_norm': 6.8435669168887e-05, 'learning_rate': 3.7768932284292145, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:32<20:31, 3.64s/it] 35%|███▌ | 183/520 [11:36<20:30, 3.65s/it] {'loss': 9.1839, 'grad_norm': 8.859660982833165e-05, 'learning_rate': 3.763471159755327, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:36<20:30, 3.65s/it] 35%|███▌ | 184/520 [11:39<20:27, 3.65s/it] {'loss': 8.9852, 'grad_norm': 7.788012300355882e-05, 'learning_rate': 3.75, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:39<20:27, 3.65s/it] 36%|███▌ | 185/520 [11:43<20:24, 3.66s/it] {'loss': 10.0202, 'grad_norm': 6.97116928863356e-05, 'learning_rate': 3.7364802725735187, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:43<20:24, 3.66s/it] 36%|███▌ | 186/520 [11:47<20:19, 3.65s/it] {'loss': 9.2595, 'grad_norm': 5.967694681148076e-05, 'learning_rate': 3.7229125027732235, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:47<20:19, 3.65s/it] 36%|███▌ | 187/520 [11:50<20:20, 3.66s/it] {'loss': 9.6558, 'grad_norm': 6.224034894451189e-05, 'learning_rate': 3.7092972177630994, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:50<20:20, 3.66s/it] 36%|███▌ | 188/520 [11:54<20:30, 3.71s/it] {'loss': 9.2314, 'grad_norm': 6.425431492125833e-05, 'learning_rate': 3.6956349465532954, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:54<20:30, 3.71s/it] 36%|███▋ | 189/520 [11:58<20:35, 3.73s/it] {'loss': 9.6911, 'grad_norm': 6.956309698958437e-05, 'learning_rate': 3.6819262199795677, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:58<20:35, 3.73s/it] 37%|███▋ | 190/520 [12:01<20:25, 3.71s/it] {'loss': 9.2773, 'grad_norm': 6.062968135021648e-05, 'learning_rate': 3.668171570682655, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:01<20:25, 3.71s/it] 37%|███▋ | 191/520 [12:05<20:28, 3.73s/it] {'loss': 9.4343, 'grad_norm': 7.171605211029952e-05, 'learning_rate': 3.6543715330875854, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:05<20:28, 3.73s/it] 37%|███▋ | 192/520 [12:09<20:20, 3.72s/it] {'loss': 9.8147, 'grad_norm': 9.235012593041488e-05, 'learning_rate': 3.6405266433829073, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:09<20:20, 3.72s/it] 37%|███▋ | 193/520 [12:13<20:09, 3.70s/it] {'loss': 10.3253, 'grad_norm': 7.019879388444816e-05, 'learning_rate': 3.6266374394998637, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:13<20:09, 3.70s/it] 37%|███▋ | 194/520 [12:16<20:07, 3.70s/it] {'loss': 9.6191, 'grad_norm': 9.50714366590028e-05, 'learning_rate': 3.6127044610914805, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:16<20:07, 3.70s/it] 38%|███▊ | 195/520 [12:20<20:04, 3.71s/it] {'loss': 9.2714, 'grad_norm': 7.63399947886287e-05, 'learning_rate': 3.5987282495116126, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:20<20:04, 3.71s/it] 38%|███▊ | 196/520 [12:24<19:55, 3.69s/it] {'loss': 9.5109, 'grad_norm': 7.837683725776848e-05, 'learning_rate': 3.584709347793895, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:24<19:55, 3.69s/it] 38%|███▊ | 197/520 [12:27<19:50, 3.69s/it] {'loss': 9.138, 'grad_norm': 6.653338224417159e-05, 'learning_rate': 3.5706483006306566, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:27<19:50, 3.69s/it] 38%|███▊ | 198/520 [12:31<19:48, 3.69s/it] {'loss': 9.6854, 'grad_norm': 9.400520856300997e-05, 'learning_rate': 3.5565456543517486, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:31<19:48, 3.69s/it] 38%|███▊ | 199/520 [12:35<20:14, 3.78s/it] {'loss': 9.3616, 'grad_norm': 7.913958396037187e-05, 'learning_rate': 3.5424019569033205, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:35<20:14, 3.78s/it] 38%|███▊ | 200/520 [12:39<20:24, 3.83s/it] {'loss': 9.9466, 'grad_norm': 7.397216391271309e-05, 'learning_rate': 3.5282177578265292, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:39<20:24, 3.83s/it] 39%|███▊ | 201/520 [12:43<20:22, 3.83s/it] {'loss': 9.8021, 'grad_norm': 8.533486284774764e-05, 'learning_rate': 3.513993608236188, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:43<20:22, 3.83s/it] 39%|███▉ | 202/520 [12:46<20:01, 3.78s/it] {'loss': 9.4373, 'grad_norm': 5.580296677806071e-05, 'learning_rate': 3.499730060799352, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:46<20:01, 3.78s/it] 39%|███▉ | 203/520 [12:50<19:44, 3.74s/it] {'loss': 9.2855, 'grad_norm': 6.467270380256674e-05, 'learning_rate': 3.4854276697138484, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:50<19:44, 3.74s/it] 39%|███▉ | 204/520 [12:54<19:32, 3.71s/it] {'loss': 9.84, 'grad_norm': 7.343368463504981e-05, 'learning_rate': 3.4710869906867368, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:54<19:32, 3.71s/it] 39%|███▉ | 205/520 [12:58<19:30, 3.72s/it] {'loss': 10.2559, 'grad_norm': 7.405475977385282e-05, 'learning_rate': 3.4567085809127245, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:58<19:30, 3.72s/it] 40%|███▉ | 206/520 [13:01<19:18, 3.69s/it] {'loss': 9.8493, 'grad_norm': 8.691924936487803e-05, 'learning_rate': 3.442292999052513, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:01<19:18, 3.69s/it] 40%|███▉ | 207/520 [13:05<19:14, 3.69s/it] {'loss': 10.244, 'grad_norm': 0.00010481202413811722, 'learning_rate': 3.427840805211095, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:05<19:14, 3.69s/it] 40%|████ | 208/520 [13:08<19:09, 3.68s/it] {'loss': 9.3516, 'grad_norm': 6.230685077437303e-05, 'learning_rate': 3.413352560915988, 'epoch': 0.4} + 40%|████ | 208/520 [13:09<19:09, 3.68s/it] 40%|████ | 209/520 [13:12<19:02, 3.67s/it] {'loss': 9.5814, 'grad_norm': 7.4652142882567e-05, 'learning_rate': 3.398828829095419, 'epoch': 0.4} + 40%|████ | 209/520 [13:12<19:02, 3.67s/it] 40%|████ | 210/520 [13:16<18:57, 3.67s/it] {'loss': 9.425, 'grad_norm': 6.841314898865487e-05, 'learning_rate': 3.3842701740564536, 'epoch': 0.4} + 40%|████ | 210/520 [13:16<18:57, 3.67s/it] 41%|████ | 211/520 [13:19<18:52, 3.66s/it] {'loss': 9.4638, 'grad_norm': 6.0995792459141556e-05, 'learning_rate': 3.3696771614630676, 'epoch': 0.41} + 41%|████ | 211/520 [13:19<18:52, 3.66s/it] 41%|████ | 212/520 [13:23<18:46, 3.66s/it] {'loss': 8.9479, 'grad_norm': 7.449652905872272e-05, 'learning_rate': 3.3550503583141724, 'epoch': 0.41} + 41%|████ | 212/520 [13:23<18:46, 3.66s/it] 41%|████ | 213/520 [13:27<18:45, 3.66s/it] {'loss': 9.8755, 'grad_norm': 0.00011303063659424587, 'learning_rate': 3.340390332921577, 'epoch': 0.41} + 41%|████ | 213/520 [13:27<18:45, 3.66s/it] 41%|████ | 214/520 [13:30<18:41, 3.67s/it] {'loss': 9.5183, 'grad_norm': 0.00013437501439305118, 'learning_rate': 3.325697654887918, 'epoch': 0.41} + 41%|████ | 214/520 [13:30<18:41, 3.67s/it] 41%|████▏ | 215/520 [13:34<18:43, 3.68s/it] {'loss': 9.9484, 'grad_norm': 0.00026476015714667795, 'learning_rate': 3.310972895084518, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:34<18:43, 3.68s/it] 42%|████▏ | 216/520 [13:38<18:38, 3.68s/it] {'loss': 9.2344, 'grad_norm': 0.00010816931146458961, 'learning_rate': 3.2962166256292114, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:38<18:38, 3.68s/it] 42%|████▏ | 217/520 [13:42<18:36, 3.68s/it] {'loss': 9.3378, 'grad_norm': 6.515663299329366e-05, 'learning_rate': 3.2814294198641116, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:42<18:36, 3.68s/it] 42%|████▏ | 218/520 [13:45<18:38, 3.70s/it] {'loss': 9.7452, 'grad_norm': 7.084119480826125e-05, 'learning_rate': 3.266611852333336, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:45<18:38, 3.70s/it] 42%|████▏ | 219/520 [13:49<18:31, 3.69s/it] {'loss': 8.9025, 'grad_norm': 7.728328635634239e-05, 'learning_rate': 3.2517644987606826, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:49<18:31, 3.69s/it] 42%|████▏ | 220/520 [13:53<18:24, 3.68s/it] {'loss': 10.2338, 'grad_norm': 7.758540833372115e-05, 'learning_rate': 3.2368879360272604, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:53<18:24, 3.68s/it] 42%|████▎ | 221/520 [13:56<18:26, 3.70s/it] {'loss': 9.3007, 'grad_norm': 7.416245968217911e-05, 'learning_rate': 3.2219827421490748, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:56<18:26, 3.70s/it] 43%|████▎ | 222/520 [14:00<18:21, 3.70s/it] {'loss': 8.9522, 'grad_norm': 8.00364390265154e-05, 'learning_rate': 3.2070494962545686, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:00<18:21, 3.70s/it] 43%|████▎ | 223/520 [14:04<18:20, 3.70s/it] {'loss': 8.9987, 'grad_norm': 6.809922958031673e-05, 'learning_rate': 3.1920887785621233, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:04<18:20, 3.70s/it] 43%|████▎ | 224/520 [14:08<18:26, 3.74s/it] {'loss': 11.6307, 'grad_norm': 9.769870430940974e-05, 'learning_rate': 3.177101170357513, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:08<18:26, 3.74s/it] 43%|████▎ | 225/520 [14:11<18:18, 3.72s/it] {'loss': 9.1996, 'grad_norm': 5.715666099712286e-05, 'learning_rate': 3.162087253971318, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:11<18:18, 3.72s/it] 43%|████▎ | 226/520 [14:15<18:22, 3.75s/it] {'loss': 9.5755, 'grad_norm': 6.410242623373966e-05, 'learning_rate': 3.147047612756302, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:15<18:22, 3.75s/it] 44%|████▎ | 227/520 [14:19<18:14, 3.74s/it] {'loss': 9.3114, 'grad_norm': 6.828533657411537e-05, 'learning_rate': 3.1319828310647435, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:19<18:14, 3.74s/it] 44%|████▍ | 228/520 [14:23<18:09, 3.73s/it] {'loss': 10.8096, 'grad_norm': 6.760727630080646e-05, 'learning_rate': 3.116893494225734, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:23<18:09, 3.73s/it] 44%|████▍ | 229/520 [14:26<18:03, 3.72s/it] {'loss': 9.3119, 'grad_norm': 6.285898111682651e-05, 'learning_rate': 3.101780188522433, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:26<18:03, 3.72s/it] 44%|████▍ | 230/520 [14:30<18:00, 3.73s/it] {'loss': 9.3522, 'grad_norm': 9.234044687690477e-05, 'learning_rate': 3.0866435011692883, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:30<18:00, 3.73s/it] 44%|████▍ | 231/520 [14:34<17:53, 3.71s/it] {'loss': 9.4985, 'grad_norm': 6.870624159409205e-05, 'learning_rate': 3.071484020289224, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:34<17:53, 3.71s/it] 45%|████▍ | 232/520 [14:37<17:45, 3.70s/it] {'loss': 11.0144, 'grad_norm': 0.00010547831713362562, 'learning_rate': 3.056302334890786, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:37<17:45, 3.70s/it] 45%|████▍ | 233/520 [14:41<17:37, 3.69s/it] {'loss': 10.371, 'grad_norm': 0.0001258188731731873, 'learning_rate': 3.0410990348452573, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:41<17:37, 3.69s/it] 45%|████▌ | 234/520 [14:45<17:32, 3.68s/it] {'loss': 8.983, 'grad_norm': 0.0001571774867665608, 'learning_rate': 3.0258747108637394, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:45<17:32, 3.68s/it] 45%|████▌ | 235/520 [14:48<17:29, 3.68s/it] {'loss': 9.3575, 'grad_norm': 6.212691930313311e-05, 'learning_rate': 3.010629954474201, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:48<17:29, 3.68s/it] 45%|████▌ | 236/520 [14:52<17:26, 3.69s/it] {'loss': 10.0829, 'grad_norm': 6.43573639712276e-05, 'learning_rate': 2.995365357998494, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:52<17:26, 3.69s/it] 46%|████▌ | 237/520 [14:56<17:23, 3.69s/it] {'loss': 9.5763, 'grad_norm': 5.9257770257786304e-05, 'learning_rate': 2.9800815145293407, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:56<17:23, 3.69s/it] 46%|████▌ | 238/520 [14:59<17:17, 3.68s/it] {'loss': 9.2037, 'grad_norm': 7.679633452135856e-05, 'learning_rate': 2.964779017907287, 'epoch': 0.46} + 46%|████▌ | 238/520 [14:59<17:17, 3.68s/it] 46%|████▌ | 239/520 [15:03<17:14, 3.68s/it] {'loss': 9.9984, 'grad_norm': 6.717430635731242e-05, 'learning_rate': 2.9494584626976317, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:03<17:14, 3.68s/it] 46%|████▌ | 240/520 [15:07<17:11, 3.68s/it] {'loss': 8.7653, 'grad_norm': 0.00010383778055722828, 'learning_rate': 2.934120444167326, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:07<17:11, 3.68s/it] 46%|████▋ | 241/520 [15:10<17:10, 3.69s/it] {'loss': 9.1351, 'grad_norm': 9.673573077669152e-05, 'learning_rate': 2.918765558261841, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:10<17:10, 3.69s/it] 47%|████▋ | 242/520 [15:14<17:05, 3.69s/it] {'loss': 9.3638, 'grad_norm': 5.583032854243762e-05, 'learning_rate': 2.903394401582017, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:14<17:05, 3.69s/it] 47%|████▋ | 243/520 [15:18<17:03, 3.70s/it] {'loss': 8.9792, 'grad_norm': 6.392174180123157e-05, 'learning_rate': 2.8880075713608786, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:18<17:03, 3.70s/it] 47%|████▋ | 244/520 [15:22<16:59, 3.69s/it] {'loss': 9.6741, 'grad_norm': 5.140899725787894e-05, 'learning_rate': 2.8726056654404357, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:22<16:59, 3.69s/it] 47%|████▋ | 245/520 [15:25<16:58, 3.70s/it] {'loss': 9.0103, 'grad_norm': 7.483970952535212e-05, 'learning_rate': 2.8571892822484504, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:25<16:58, 3.70s/it] 47%|████▋ | 246/520 [15:29<16:56, 3.71s/it] {'loss': 10.6734, 'grad_norm': 5.5247987528368854e-05, 'learning_rate': 2.8417590207751835, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:29<16:56, 3.71s/it] 48%|████▊ | 247/520 [15:33<16:51, 3.70s/it] {'loss': 10.0756, 'grad_norm': 8.931068401358069e-05, 'learning_rate': 2.8263154805501296, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:33<16:51, 3.70s/it] 48%|████▊ | 248/520 [15:36<16:51, 3.72s/it] {'loss': 9.0795, 'grad_norm': 8.955397914372973e-05, 'learning_rate': 2.810859261618713, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:36<16:51, 3.72s/it] 48%|████▊ | 249/520 [15:40<16:47, 3.72s/it] {'loss': 9.7026, 'grad_norm': 5.6493037991103837e-05, 'learning_rate': 2.7953909645189823, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:40<16:47, 3.72s/it] 48%|████▊ | 250/520 [15:44<16:43, 3.72s/it] {'loss': 9.5788, 'grad_norm': 8.873935334108159e-05, 'learning_rate': 2.77991119025827, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:44<16:43, 3.72s/it] 48%|████▊ | 251/520 [15:48<16:37, 3.71s/it] {'loss': 9.8678, 'grad_norm': 6.425743905570525e-05, 'learning_rate': 2.7644205402898447, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:48<16:37, 3.71s/it] 48%|████▊ | 252/520 [15:51<16:33, 3.71s/it] {'loss': 10.292, 'grad_norm': 6.792843741097162e-05, 'learning_rate': 2.748919616489542, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:51<16:33, 3.71s/it] 49%|████▊ | 253/520 [15:55<16:31, 3.72s/it] {'loss': 9.9021, 'grad_norm': 7.25672199313685e-05, 'learning_rate': 2.7334090211323763, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:55<16:31, 3.72s/it] 49%|████▉ | 254/520 [15:59<16:24, 3.70s/it] {'loss': 9.2073, 'grad_norm': 6.488985263095034e-05, 'learning_rate': 2.717889356869146, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:59<16:24, 3.70s/it] 49%|████▉ | 255/520 [16:02<16:18, 3.69s/it] {'loss': 9.4826, 'grad_norm': 6.30360413667597e-05, 'learning_rate': 2.702361226703008, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:02<16:18, 3.69s/it] 49%|████▉ | 256/520 [16:06<16:15, 3.70s/it] {'loss': 9.5104, 'grad_norm': 5.3241463108815105e-05, 'learning_rate': 2.686825233966061, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:06<16:15, 3.70s/it] 49%|████▉ | 257/520 [16:10<16:07, 3.68s/it] {'loss': 9.4788, 'grad_norm': 5.698610063397631e-05, 'learning_rate': 2.6712819822958918, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:10<16:07, 3.68s/it] 50%|████▉ | 258/520 [16:13<16:03, 3.68s/it] {'loss': 9.4668, 'grad_norm': 5.771511877498832e-05, 'learning_rate': 2.6557320756121303, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:13<16:03, 3.68s/it] 50%|████▉ | 259/520 [16:17<15:59, 3.68s/it] {'loss': 9.9285, 'grad_norm': 6.178017502792441e-05, 'learning_rate': 2.6401761180929793, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:17<15:59, 3.68s/it] 50%|█████ | 260/520 [16:21<15:54, 3.67s/it] {'loss': 10.3313, 'grad_norm': 6.961083106885964e-05, 'learning_rate': 2.624614714151743, 'epoch': 0.5} + 50%|█████ | 260/520 [16:21<15:54, 3.67s/it] 50%|█████ | 261/520 [16:24<15:53, 3.68s/it] {'loss': 10.4584, 'grad_norm': 6.045627954553553e-05, 'learning_rate': 2.60904846841334, 'epoch': 0.5} + 50%|█████ | 261/520 [16:24<15:53, 3.68s/it] 50%|█████ | 262/520 [16:28<15:48, 3.68s/it] {'loss': 9.3313, 'grad_norm': 6.819477205873844e-05, 'learning_rate': 2.593477985690815, 'epoch': 0.5} + 50%|█████ | 262/520 [16:28<15:48, 3.68s/it] 51%|█████ | 263/520 [16:32<15:43, 3.67s/it] {'loss': 10.5342, 'grad_norm': 8.181263096489187e-05, 'learning_rate': 2.577903870961833, 'epoch': 0.51} + 51%|█████ | 263/520 [16:32<15:43, 3.67s/it] 51%|█████ | 264/520 [16:35<15:40, 3.67s/it] {'loss': 9.6992, 'grad_norm': 4.513422713550503e-05, 'learning_rate': 2.562326729345182, 'epoch': 0.51} + 51%|█████ | 264/520 [16:35<15:40, 3.67s/it] 51%|█████ | 265/520 [16:39<15:40, 3.69s/it] {'loss': 9.5753, 'grad_norm': 8.217927920446584e-05, 'learning_rate': 2.546747166077256, 'epoch': 0.51} + 51%|█████ | 265/520 [16:39<15:40, 3.69s/it] 51%|█████ | 266/520 [16:43<15:35, 3.68s/it] {'loss': 8.4818, 'grad_norm': 8.79742947555334e-05, 'learning_rate': 2.531165786488538, 'epoch': 0.51} + 51%|█████ | 266/520 [16:43<15:35, 3.68s/it] 51%|█████▏ | 267/520 [16:46<15:32, 3.69s/it] {'loss': 9.2711, 'grad_norm': 6.449242680839145e-05, 'learning_rate': 2.515583195980084, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:46<15:32, 3.69s/it] 52%|█████▏ | 268/520 [16:50<15:29, 3.69s/it] {'loss': 10.9301, 'grad_norm': 7.845046744115227e-05, 'learning_rate': 2.5, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:50<15:29, 3.69s/it] 52%|█████▏ | 269/520 [16:54<15:24, 3.68s/it] {'loss': 9.6951, 'grad_norm': 6.426370149005804e-05, 'learning_rate': 2.484416804019916, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:54<15:24, 3.68s/it] 52%|█████▏ | 270/520 [16:58<15:25, 3.70s/it] {'loss': 9.8742, 'grad_norm': 0.00010433663294259543, 'learning_rate': 2.4688342135114625, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:58<15:25, 3.70s/it] 52%|█████▏ | 271/520 [17:01<15:21, 3.70s/it] {'loss': 10.1093, 'grad_norm': 5.954495945725371e-05, 'learning_rate': 2.453252833922745, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:01<15:21, 3.70s/it] 52%|█████▏ | 272/520 [17:05<15:19, 3.71s/it] {'loss': 10.5583, 'grad_norm': 7.367928022391688e-05, 'learning_rate': 2.4376732706548183, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:05<15:19, 3.71s/it] 52%|█████▎ | 273/520 [17:09<15:27, 3.75s/it] {'loss': 10.6519, 'grad_norm': 9.396695233258654e-05, 'learning_rate': 2.4220961290381675, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:09<15:27, 3.75s/it] 53%|█████▎ | 274/520 [17:13<15:31, 3.78s/it] {'loss': 9.2142, 'grad_norm': 7.749128026871329e-05, 'learning_rate': 2.406522014309186, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:13<15:31, 3.78s/it] 53%|█████▎ | 275/520 [17:17<15:41, 3.84s/it] {'loss': 9.4492, 'grad_norm': 7.446242334013844e-05, 'learning_rate': 2.3909515315866603, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:17<15:41, 3.84s/it] 53%|█████▎ | 276/520 [17:21<15:37, 3.84s/it] {'loss': 9.9324, 'grad_norm': 5.5372254604301005e-05, 'learning_rate': 2.3753852858482567, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:21<15:37, 3.84s/it] 53%|█████▎ | 277/520 [17:24<15:34, 3.85s/it] {'loss': 10.6307, 'grad_norm': 7.078499899977919e-05, 'learning_rate': 2.3598238819070203, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:24<15:34, 3.85s/it] 53%|█████▎ | 278/520 [17:28<15:33, 3.86s/it] {'loss': 8.6373, 'grad_norm': 7.781187852155854e-05, 'learning_rate': 2.3442679243878697, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:28<15:33, 3.86s/it] 54%|█████▎ | 279/520 [17:32<15:28, 3.85s/it] {'loss': 9.9884, 'grad_norm': 8.35759786305292e-05, 'learning_rate': 2.3287180177041082, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:32<15:28, 3.85s/it] 54%|█████▍ | 280/520 [17:36<15:25, 3.85s/it] {'loss': 9.2494, 'grad_norm': 4.624015704027248e-05, 'learning_rate': 2.3131747660339395, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:36<15:25, 3.85s/it] 54%|█████▍ | 281/520 [17:40<15:23, 3.86s/it] {'loss': 9.6825, 'grad_norm': 4.917272628698121e-05, 'learning_rate': 2.297638773296992, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:40<15:23, 3.86s/it] 54%|█████▍ | 282/520 [17:44<15:16, 3.85s/it] {'loss': 8.7733, 'grad_norm': 6.110030677554181e-05, 'learning_rate': 2.282110643130854, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:44<15:16, 3.85s/it] 54%|█████▍ | 283/520 [17:48<15:12, 3.85s/it] {'loss': 9.8456, 'grad_norm': 5.510612806770364e-05, 'learning_rate': 2.2665909788676237, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:48<15:12, 3.85s/it] 55%|█████▍ | 284/520 [17:51<15:09, 3.85s/it] {'loss': 10.2942, 'grad_norm': 8.567446273142675e-05, 'learning_rate': 2.251080383510459, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:51<15:09, 3.85s/it] 55%|█████▍ | 285/520 [17:55<15:06, 3.86s/it] {'loss': 9.2321, 'grad_norm': 9.598753046483139e-05, 'learning_rate': 2.2355794597101557, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:55<15:06, 3.86s/it] 55%|█████▌ | 286/520 [17:59<15:09, 3.89s/it] {'loss': 8.9944, 'grad_norm': 8.228746968584419e-05, 'learning_rate': 2.22008880974173, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:59<15:09, 3.89s/it] 55%|█████▌ | 287/520 [18:03<15:05, 3.89s/it] {'loss': 9.4306, 'grad_norm': 8.184802567194987e-05, 'learning_rate': 2.204609035481018, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:03<15:05, 3.89s/it] 55%|█████▌ | 288/520 [18:07<15:00, 3.88s/it] {'loss': 10.1908, 'grad_norm': 0.0001450541844662586, 'learning_rate': 2.1891407383812878, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:07<15:00, 3.88s/it] 56%|█████▌ | 289/520 [18:11<15:00, 3.90s/it] {'loss': 9.323, 'grad_norm': 0.00011245077786736343, 'learning_rate': 2.1736845194498717, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:11<15:00, 3.90s/it] 56%|█████▌ | 290/520 [18:15<15:00, 3.92s/it] {'loss': 8.9357, 'grad_norm': 9.836257006168727e-05, 'learning_rate': 2.158240979224817, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:15<15:00, 3.92s/it] 56%|█████▌ | 291/520 [18:19<14:54, 3.91s/it] {'loss': 9.0412, 'grad_norm': 5.2033195647051783e-05, 'learning_rate': 2.1428107177515505, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:19<14:54, 3.91s/it] 56%|█████▌ | 292/520 [18:23<14:50, 3.91s/it] {'loss': 9.6604, 'grad_norm': 4.3493186255211644e-05, 'learning_rate': 2.127394334559564, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:23<14:50, 3.91s/it] 56%|█████▋ | 293/520 [18:26<14:35, 3.86s/it] {'loss': 9.228, 'grad_norm': 4.542541313196677e-05, 'learning_rate': 2.111992428639121, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:26<14:35, 3.86s/it] 57%|█████▋ | 294/520 [18:30<14:18, 3.80s/it] {'loss': 9.6383, 'grad_norm': 4.2180266999278345e-05, 'learning_rate': 2.096605598417983, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:30<14:18, 3.80s/it] 57%|█████▋ | 295/520 [18:34<14:08, 3.77s/it] {'loss': 10.5291, 'grad_norm': 5.752527911548314e-05, 'learning_rate': 2.081234441738159, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:34<14:08, 3.77s/it] 57%|█████▋ | 296/520 [18:37<13:56, 3.73s/it] {'loss': 8.7761, 'grad_norm': 4.899323715130659e-05, 'learning_rate': 2.0658795558326744, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:37<13:56, 3.73s/it] 57%|█████▋ | 297/520 [18:41<13:49, 3.72s/it] {'loss': 9.7334, 'grad_norm': 3.529687215222191e-05, 'learning_rate': 2.0505415373023683, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:41<13:49, 3.72s/it] 57%|█████▋ | 298/520 [18:45<13:44, 3.71s/it] {'loss': 9.3941, 'grad_norm': 4.63781802036806e-05, 'learning_rate': 2.0352209820927136, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:45<13:44, 3.71s/it] 57%|█████▊ | 299/520 [18:48<13:38, 3.70s/it] {'loss': 10.5406, 'grad_norm': 6.616344412964647e-05, 'learning_rate': 2.0199184854706598, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:48<13:38, 3.70s/it] 58%|█████▊ | 300/520 [18:52<13:31, 3.69s/it] {'loss': 9.7592, 'grad_norm': 4.520632973540646e-05, 'learning_rate': 2.0046346420015064, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:52<13:31, 3.69s/it] 58%|█████▊ | 301/520 [18:56<13:27, 3.69s/it] {'loss': 9.4938, 'grad_norm': 6.114577200829887e-05, 'learning_rate': 1.9893700455257997, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:56<13:27, 3.69s/it] 58%|█████▊ | 302/520 [18:59<13:22, 3.68s/it] {'loss': 10.5031, 'grad_norm': 7.65563918858685e-05, 'learning_rate': 1.974125289136261, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:59<13:22, 3.68s/it] 58%|█████▊ | 303/520 [19:03<13:16, 3.67s/it] {'loss': 9.161, 'grad_norm': 6.270865782042505e-05, 'learning_rate': 1.9589009651547429, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:03<13:16, 3.67s/it] 58%|█████▊ | 304/520 [19:07<13:17, 3.69s/it] {'loss': 10.3693, 'grad_norm': 7.616171447140048e-05, 'learning_rate': 1.9436976651092142, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:07<13:17, 3.69s/it] 59%|█████▊ | 305/520 [19:11<13:11, 3.68s/it] {'loss': 10.0304, 'grad_norm': 5.5211176621375156e-05, 'learning_rate': 1.9285159797107765, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:11<13:11, 3.68s/it] 59%|█████▉ | 306/520 [19:14<13:08, 3.69s/it] {'loss': 9.7945, 'grad_norm': 5.330923045023893e-05, 'learning_rate': 1.9133564988307126, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:14<13:08, 3.69s/it] 59%|█████▉ | 307/520 [19:18<13:02, 3.67s/it] {'loss': 9.3805, 'grad_norm': 4.577437116620746e-05, 'learning_rate': 1.8982198114775681, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:18<13:02, 3.67s/it] 59%|█████▉ | 308/520 [19:22<12:57, 3.67s/it] {'loss': 9.6295, 'grad_norm': 5.2019623847201746e-05, 'learning_rate': 1.8831065057742657, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:22<12:57, 3.67s/it] 59%|█████▉ | 309/520 [19:26<13:17, 3.78s/it] {'loss': 9.197, 'grad_norm': 4.43795278496313e-05, 'learning_rate': 1.868017168935256, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:26<13:17, 3.78s/it] 60%|█████▉ | 310/520 [19:29<13:05, 3.74s/it] {'loss': 9.2865, 'grad_norm': 4.058054168410818e-05, 'learning_rate': 1.852952387243698, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:29<13:05, 3.74s/it] 60%|█████▉ | 311/520 [19:33<12:59, 3.73s/it] {'loss': 9.4458, 'grad_norm': 3.1255803649697115e-05, 'learning_rate': 1.8379127460286817, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:33<12:59, 3.73s/it] 60%|██████ | 312/520 [19:37<12:53, 3.72s/it] {'loss': 9.1459, 'grad_norm': 4.137397930059491e-05, 'learning_rate': 1.8228988296424875, 'epoch': 0.6} + 60%|██████ | 312/520 [19:37<12:53, 3.72s/it] 60%|██████ | 313/520 [19:40<12:44, 3.69s/it] {'loss': 8.4981, 'grad_norm': 5.860993225113641e-05, 'learning_rate': 1.8079112214378767, 'epoch': 0.6} + 60%|██████ | 313/520 [19:40<12:44, 3.69s/it] 60%|██████ | 314/520 [19:44<13:03, 3.80s/it] {'loss': 9.3203, 'grad_norm': 3.455075254336656e-05, 'learning_rate': 1.7929505037454314, 'epoch': 0.6} + 60%|██████ | 314/520 [19:44<13:03, 3.80s/it] 61%|██████ | 315/520 [19:48<12:54, 3.78s/it] {'loss': 10.5825, 'grad_norm': 4.65471332501318e-05, 'learning_rate': 1.7780172578509257, 'epoch': 0.61} + 61%|██████ | 315/520 [19:48<12:54, 3.78s/it] 61%|██████ | 316/520 [19:52<13:07, 3.86s/it] {'loss': 9.1644, 'grad_norm': 4.0500104188188474e-05, 'learning_rate': 1.7631120639727393, 'epoch': 0.61} + 61%|██████ | 316/520 [19:52<13:07, 3.86s/it] 61%|██████ | 317/520 [19:56<12:55, 3.82s/it] {'loss': 8.5989, 'grad_norm': 5.653764770691824e-05, 'learning_rate': 1.7482355012393174, 'epoch': 0.61} + 61%|██████ | 317/520 [19:56<12:55, 3.82s/it] 61%|██████ | 318/520 [20:00<12:45, 3.79s/it] {'loss': 10.0601, 'grad_norm': 5.6414033023144514e-05, 'learning_rate': 1.7333881476666646, 'epoch': 0.61} + 61%|██████ | 318/520 [20:00<12:45, 3.79s/it] 61%|██████▏ | 319/520 [20:04<12:56, 3.86s/it] {'loss': 9.0131, 'grad_norm': 5.0391534102605024e-05, 'learning_rate': 1.7185705801358893, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:04<12:56, 3.86s/it] 62%|██████▏ | 320/520 [20:07<12:42, 3.81s/it] {'loss': 9.34, 'grad_norm': 4.647557062602322e-05, 'learning_rate': 1.703783374370789, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:07<12:42, 3.81s/it] 62%|██████▏ | 321/520 [20:11<12:34, 3.79s/it] {'loss': 9.5203, 'grad_norm': 5.206512514416211e-05, 'learning_rate': 1.6890271049154826, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:11<12:34, 3.79s/it] 62%|██████▏ | 322/520 [20:15<12:25, 3.76s/it] {'loss': 10.3391, 'grad_norm': 7.616943876336505e-05, 'learning_rate': 1.674302345112083, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:15<12:25, 3.76s/it] 62%|██████▏ | 323/520 [20:18<12:17, 3.74s/it] {'loss': 10.3749, 'grad_norm': 3.35051945047358e-05, 'learning_rate': 1.6596096670784235, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:18<12:17, 3.74s/it] 62%|██████▏ | 324/520 [20:22<12:12, 3.74s/it] {'loss': 9.268, 'grad_norm': 5.388428374987272e-05, 'learning_rate': 1.6449496416858285, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:22<12:12, 3.74s/it] 62%|██████▎ | 325/520 [20:26<12:08, 3.74s/it] {'loss': 9.5892, 'grad_norm': 4.444504097827032e-05, 'learning_rate': 1.6303228385369317, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:26<12:08, 3.74s/it] 63%|██████▎ | 326/520 [20:30<12:03, 3.73s/it] {'loss': 9.6272, 'grad_norm': 5.361148439270319e-05, 'learning_rate': 1.6157298259435464, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:30<12:03, 3.73s/it] 63%|██████▎ | 327/520 [20:33<11:56, 3.71s/it] {'loss': 10.4215, 'grad_norm': 5.658710641428929e-05, 'learning_rate': 1.601171170904581, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:33<11:56, 3.71s/it] 63%|██████▎ | 328/520 [20:37<11:51, 3.71s/it] {'loss': 9.832, 'grad_norm': 7.24540995767955e-05, 'learning_rate': 1.5866474390840124, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:37<11:51, 3.71s/it] 63%|██████▎ | 329/520 [20:41<11:50, 3.72s/it] {'loss': 8.8315, 'grad_norm': 6.213164377217446e-05, 'learning_rate': 1.572159194788905, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:41<11:50, 3.72s/it] 63%|██████▎ | 330/520 [20:45<11:55, 3.77s/it] {'loss': 9.5203, 'grad_norm': 5.110216491266319e-05, 'learning_rate': 1.557707000947487, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:45<11:55, 3.77s/it] 64%|██████▎ | 331/520 [20:48<11:55, 3.79s/it] {'loss': 9.437, 'grad_norm': 4.19767078711104e-05, 'learning_rate': 1.5432914190872757, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:48<11:55, 3.79s/it] 64%|██████▍ | 332/520 [20:52<11:54, 3.80s/it] {'loss': 10.4226, 'grad_norm': 4.832686274021645e-05, 'learning_rate': 1.5289130093132632, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:52<11:54, 3.80s/it] 64%|██████▍ | 333/520 [20:56<11:52, 3.81s/it] {'loss': 10.0688, 'grad_norm': 5.916835717892368e-05, 'learning_rate': 1.514572330286152, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:56<11:52, 3.81s/it] 64%|██████▍ | 334/520 [21:00<11:49, 3.81s/it] {'loss': 9.3052, 'grad_norm': 6.0181609644101374e-05, 'learning_rate': 1.500269939200648, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:00<11:49, 3.81s/it] 64%|██████▍ | 335/520 [21:04<11:47, 3.82s/it] {'loss': 9.4019, 'grad_norm': 6.12309021976964e-05, 'learning_rate': 1.4860063917638127, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:04<11:47, 3.82s/it] 65%|██████▍ | 336/520 [21:08<11:45, 3.83s/it] {'loss': 9.2099, 'grad_norm': 4.546070229068187e-05, 'learning_rate': 1.4717822421734716, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:08<11:45, 3.83s/it] 65%|██████▍ | 337/520 [21:11<11:41, 3.83s/it] {'loss': 9.3963, 'grad_norm': 3.77348767747985e-05, 'learning_rate': 1.4575980430966806, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:11<11:41, 3.83s/it] 65%|██████▌ | 338/520 [21:15<11:36, 3.83s/it] {'loss': 9.3852, 'grad_norm': 3.2430363902565456e-05, 'learning_rate': 1.4434543456482518, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:15<11:36, 3.83s/it] 65%|██████▌ | 339/520 [21:19<11:26, 3.79s/it] {'loss': 9.6937, 'grad_norm': 4.0876880819903316e-05, 'learning_rate': 1.429351699369343, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:19<11:26, 3.79s/it] 65%|██████▌ | 340/520 [21:23<11:20, 3.78s/it] {'loss': 9.171, 'grad_norm': 2.6914132970057088e-05, 'learning_rate': 1.4152906522061048, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:23<11:20, 3.78s/it] 66%|██████▌ | 341/520 [21:26<11:15, 3.78s/it] {'loss': 9.4376, 'grad_norm': 3.537605995496903e-05, 'learning_rate': 1.4012717504883874, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:26<11:15, 3.78s/it] 66%|██████▌ | 342/520 [21:30<11:06, 3.74s/it] {'loss': 10.6528, 'grad_norm': 3.833466211213659e-05, 'learning_rate': 1.387295538908519, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:30<11:06, 3.74s/it] 66%|██████▌ | 343/520 [21:34<10:59, 3.73s/it] {'loss': 10.2093, 'grad_norm': 2.7220176791636313e-05, 'learning_rate': 1.3733625605001365, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:34<10:59, 3.73s/it] 66%|██████▌ | 344/520 [21:37<10:53, 3.71s/it] {'loss': 9.1192, 'grad_norm': 5.631535989713341e-05, 'learning_rate': 1.3594733566170925, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:37<10:53, 3.71s/it] 66%|██████▋ | 345/520 [21:41<10:47, 3.70s/it] {'loss': 9.5244, 'grad_norm': 2.8389160480968374e-05, 'learning_rate': 1.3456284669124159, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:41<10:47, 3.70s/it] 67%|██████▋ | 346/520 [21:45<10:44, 3.70s/it] {'loss': 10.4136, 'grad_norm': 4.653651533299006e-05, 'learning_rate': 1.331828429317345, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:45<10:44, 3.70s/it] 67%|██████▋ | 347/520 [21:49<10:48, 3.75s/it] {'loss': 8.6545, 'grad_norm': 4.448921452357023e-05, 'learning_rate': 1.3180737800204327, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:49<10:48, 3.75s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:52<10:43, 3.74s/it] {'loss': 9.745, 'grad_norm': 4.7382327715782816e-05, 'learning_rate': 1.3043650534467053, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:52<10:43, 3.74s/it] 67%|██████▋ | 349/520 [21:56<10:36, 3.72s/it] {'loss': 9.9378, 'grad_norm': 4.634183880174814e-05, 'learning_rate': 1.2907027822369006, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:56<10:36, 3.72s/it] 67%|██████▋ | 350/520 [22:00<10:29, 3.70s/it] {'loss': 9.4791, 'grad_norm': 2.739145678308495e-05, 'learning_rate': 1.2770874972267776, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:00<10:29, 3.70s/it] 68%|██████▊ | 351/520 [22:03<10:25, 3.70s/it] {'loss': 9.0949, 'grad_norm': 3.302262936297271e-05, 'learning_rate': 1.2635197274264813, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:03<10:25, 3.70s/it] 68%|██████▊ | 352/520 [22:07<10:20, 3.69s/it] {'loss': 9.487, 'grad_norm': 2.870864519568776e-05, 'learning_rate': 1.2500000000000004, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:07<10:20, 3.69s/it] 68%|██████▊ | 353/520 [22:11<10:17, 3.70s/it] {'loss': 9.9857, 'grad_norm': 3.997224523683678e-05, 'learning_rate': 1.236528840244674, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:11<10:17, 3.70s/it] 68%|██████▊ | 354/520 [22:15<10:13, 3.70s/it] {'loss': 10.4872, 'grad_norm': 2.8342899218281983e-05, 'learning_rate': 1.2231067715707866, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:15<10:13, 3.70s/it] 68%|██████▊ | 355/520 [22:18<10:08, 3.69s/it] {'loss': 9.1796, 'grad_norm': 5.060448508223327e-05, 'learning_rate': 1.2097343154812332, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:18<10:08, 3.69s/it] 68%|██████▊ | 356/520 [22:22<10:05, 3.69s/it] {'loss': 9.4309, 'grad_norm': 3.476635238173078e-05, 'learning_rate': 1.196411991551255, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:22<10:05, 3.69s/it] 69%|██████▊ | 357/520 [22:26<09:59, 3.68s/it] {'loss': 8.7883, 'grad_norm': 4.365888934894731e-05, 'learning_rate': 1.183140317408248, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:26<09:59, 3.68s/it] 69%|██████▉ | 358/520 [22:29<09:59, 3.70s/it] {'loss': 9.1836, 'grad_norm': 4.206840824843568e-05, 'learning_rate': 1.169919808711659, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:29<09:59, 3.70s/it] 69%|██████▉ | 359/520 [22:33<09:57, 3.71s/it] {'loss': 10.2649, 'grad_norm': 6.174394732216952e-05, 'learning_rate': 1.15675097913294, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:33<09:57, 3.71s/it] 69%|██████▉ | 360/520 [22:37<09:51, 3.70s/it] {'loss': 10.636, 'grad_norm': 4.581590235807907e-05, 'learning_rate': 1.1436343403356017, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:37<09:51, 3.70s/it] 69%|██████▉ | 361/520 [22:40<09:46, 3.69s/it] {'loss': 10.3153, 'grad_norm': 4.784567284921903e-05, 'learning_rate': 1.130570401955322, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:40<09:46, 3.69s/it] 70%|██████▉ | 362/520 [22:44<09:41, 3.68s/it] {'loss': 9.1819, 'grad_norm': 5.9057472865811795e-05, 'learning_rate': 1.1175596715801515, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:44<09:41, 3.68s/it] 70%|██████▉ | 363/520 [22:48<09:36, 3.67s/it] {'loss': 9.6121, 'grad_norm': 3.624455810415816e-05, 'learning_rate': 1.1046026547307906, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:48<09:36, 3.67s/it] 70%|███████ | 364/520 [22:51<09:33, 3.68s/it] {'loss': 10.5465, 'grad_norm': 6.256483636140871e-05, 'learning_rate': 1.091699854840945, 'epoch': 0.7} + 70%|███████ | 364/520 [22:51<09:33, 3.68s/it] 70%|███████ | 365/520 [22:55<09:29, 3.68s/it] {'loss': 9.7663, 'grad_norm': 3.066979488319745e-05, 'learning_rate': 1.0788517732377696, 'epoch': 0.7} + 70%|███████ | 365/520 [22:55<09:29, 3.68s/it] 70%|███████ | 366/520 [22:59<09:27, 3.68s/it] {'loss': 9.5745, 'grad_norm': 3.7146531770603164e-05, 'learning_rate': 1.0660589091223853, 'epoch': 0.7} + 70%|███████ | 366/520 [22:59<09:27, 3.68s/it] 71%|███████ | 367/520 [23:02<09:24, 3.69s/it] {'loss': 9.7975, 'grad_norm': 5.071163510058444e-05, 'learning_rate': 1.0533217595504856, 'epoch': 0.71} + 71%|███████ | 367/520 [23:02<09:24, 3.69s/it] 71%|███████ | 368/520 [23:06<09:21, 3.69s/it] {'loss': 9.0974, 'grad_norm': 3.551898273301838e-05, 'learning_rate': 1.0406408194130259, 'epoch': 0.71} + 71%|███████ | 368/520 [23:06<09:21, 3.69s/it] 71%|███████ | 369/520 [23:10<09:17, 3.69s/it] {'loss': 9.8171, 'grad_norm': 4.493771366513493e-05, 'learning_rate': 1.0280165814169884, 'epoch': 0.71} + 71%|███████ | 369/520 [23:10<09:17, 3.69s/it] 71%|███████ | 370/520 [23:14<09:14, 3.70s/it] {'loss': 9.2416, 'grad_norm': 3.857227966281962e-05, 'learning_rate': 1.0154495360662463, 'epoch': 0.71} + 71%|███████ | 370/520 [23:14<09:14, 3.70s/it] 71%|███████▏ | 371/520 [23:17<09:11, 3.70s/it] {'loss': 9.4894, 'grad_norm': 3.7558581898216754e-05, 'learning_rate': 1.0029401716424993, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:17<09:11, 3.70s/it] 72%|███████▏ | 372/520 [23:21<09:08, 3.71s/it] {'loss': 10.5895, 'grad_norm': 3.0533536885445356e-05, 'learning_rate': 0.990488974186306, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:21<09:08, 3.71s/it] 72%|███████▏ | 373/520 [23:25<09:05, 3.71s/it] {'loss': 10.3631, 'grad_norm': 3.891667317203511e-05, 'learning_rate': 0.9780964274781984, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:25<09:05, 3.71s/it] 72%|███████▏ | 374/520 [23:28<09:02, 3.71s/it] {'loss': 9.3314, 'grad_norm': 3.721638047462653e-05, 'learning_rate': 0.9657630130198819, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:28<09:02, 3.71s/it] 72%|███████▏ | 375/520 [23:32<08:58, 3.71s/it] {'loss': 8.9444, 'grad_norm': 3.073790743795655e-05, 'learning_rate': 0.9534892100155296, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:32<08:58, 3.71s/it] 72%|███████▏ | 376/520 [23:36<08:54, 3.71s/it] {'loss': 9.3915, 'grad_norm': 3.324452175355475e-05, 'learning_rate': 0.9412754953531663, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:36<08:54, 3.71s/it] 72%|███████▎ | 377/520 [23:40<08:49, 3.70s/it] {'loss': 9.4038, 'grad_norm': 3.286491854393151e-05, 'learning_rate': 0.9291223435861318, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:40<08:49, 3.70s/it] 73%|███████▎ | 378/520 [23:43<08:45, 3.70s/it] {'loss': 9.7242, 'grad_norm': 3.280874155407941e-05, 'learning_rate': 0.9170302269146507, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:43<08:45, 3.70s/it] 73%|███████▎ | 379/520 [23:47<08:41, 3.70s/it] {'loss': 9.6954, 'grad_norm': 3.458758224705916e-05, 'learning_rate': 0.9049996151674788, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:47<08:41, 3.70s/it] 73%|███████▎ | 380/520 [23:51<08:37, 3.70s/it] {'loss': 10.4209, 'grad_norm': 3.232553706054938e-05, 'learning_rate': 0.8930309757836516, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:51<08:37, 3.70s/it] 73%|███████▎ | 381/520 [23:54<08:35, 3.71s/it] {'loss': 9.5953, 'grad_norm': 4.0370095188766735e-05, 'learning_rate': 0.8811247737943242, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:54<08:35, 3.71s/it] 73%|███████▎ | 382/520 [23:58<08:35, 3.73s/it] {'loss': 10.5537, 'grad_norm': 2.8542769766774376e-05, 'learning_rate': 0.869281471804698, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:58<08:35, 3.73s/it] 74%|███████▎ | 383/520 [24:02<08:34, 3.76s/it] {'loss': 9.1043, 'grad_norm': 4.8935791618895135e-05, 'learning_rate': 0.8575015299760491, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:02<08:34, 3.76s/it] 74%|███████▍ | 384/520 [24:06<08:32, 3.77s/it] {'loss': 11.565, 'grad_norm': 4.6585933055111664e-05, 'learning_rate': 0.845785406007852, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:06<08:32, 3.77s/it] 74%|███████▍ | 385/520 [24:10<08:30, 3.78s/it] {'loss': 9.4649, 'grad_norm': 5.326232334309041e-05, 'learning_rate': 0.8341335551199902, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:10<08:30, 3.78s/it] 74%|███████▍ | 386/520 [24:13<08:29, 3.81s/it] {'loss': 8.8758, 'grad_norm': 5.9144929340489053e-05, 'learning_rate': 0.8225464300350751, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:13<08:29, 3.81s/it] 74%|███████▍ | 387/520 [24:17<08:26, 3.81s/it] {'loss': 10.8874, 'grad_norm': 5.25033267636596e-05, 'learning_rate': 0.8110244809608493, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:17<08:26, 3.81s/it] 75%|███████▍ | 388/520 [24:21<08:23, 3.81s/it] {'loss': 9.1307, 'grad_norm': 5.3791723434420706e-05, 'learning_rate': 0.799568155572701, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:21<08:23, 3.81s/it] 75%|███████▍ | 389/520 [24:25<08:21, 3.83s/it] {'loss': 9.6297, 'grad_norm': 3.7878922236681105e-05, 'learning_rate': 0.7881778989962662, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:25<08:21, 3.83s/it] 75%|███████▌ | 390/520 [24:29<08:19, 3.84s/it] {'loss': 9.4132, 'grad_norm': 2.604062274325174e-05, 'learning_rate': 0.7768541537901325, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:29<08:19, 3.84s/it] 75%|███████▌ | 391/520 [24:33<08:16, 3.85s/it] {'loss': 9.8981, 'grad_norm': 3.510723456996342e-05, 'learning_rate': 0.7655973599286459, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:33<08:16, 3.85s/it] 75%|███████▌ | 392/520 [24:37<08:12, 3.85s/it] {'loss': 9.0901, 'grad_norm': 2.383707905831321e-05, 'learning_rate': 0.7544079547848181, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:37<08:12, 3.85s/it] 76%|███████▌ | 393/520 [24:40<08:04, 3.81s/it] {'loss': 10.0203, 'grad_norm': 3.304560552510377e-05, 'learning_rate': 0.7432863731133271, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:40<08:04, 3.81s/it] 76%|███████▌ | 394/520 [24:44<07:54, 3.77s/it] {'loss': 9.3571, 'grad_norm': 2.8778078533650674e-05, 'learning_rate': 0.7322330470336313, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:44<07:54, 3.77s/it] 76%|███████▌ | 395/520 [24:48<07:47, 3.74s/it] {'loss': 9.2556, 'grad_norm': 2.7271236929341977e-05, 'learning_rate': 0.7212484060131752, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:48<07:47, 3.74s/it] 76%|███████▌ | 396/520 [24:51<07:40, 3.71s/it] {'loss': 9.6026, 'grad_norm': 2.6504527013601866e-05, 'learning_rate': 0.7103328768507039, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:51<07:40, 3.71s/it] 76%|███████▋ | 397/520 [24:55<07:38, 3.72s/it] {'loss': 9.583, 'grad_norm': 2.5696172442074796e-05, 'learning_rate': 0.699486883659684, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:55<07:38, 3.72s/it] 77%|███████▋ | 398/520 [24:59<07:33, 3.71s/it] {'loss': 9.7704, 'grad_norm': 3.45990544498144e-05, 'learning_rate': 0.6887108478518184, 'epoch': 0.77} + 77%|███████▋ | 398/520 [24:59<07:33, 3.71s/it] 77%|███████▋ | 399/520 [25:02<07:30, 3.72s/it] {'loss': 10.2963, 'grad_norm': 3.5435585382839785e-05, 'learning_rate': 0.6780051881206792, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:02<07:30, 3.72s/it] 77%|███████▋ | 400/520 [25:06<07:27, 3.73s/it] {'loss': 10.0823, 'grad_norm': 2.6883492651991677e-05, 'learning_rate': 0.6673703204254346, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:06<07:27, 3.73s/it] 77%|███████▋ | 401/520 [25:10<07:21, 3.71s/it] {'loss': 8.4461, 'grad_norm': 5.42723046743313e-05, 'learning_rate': 0.6568066579746901, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:10<07:21, 3.71s/it] 77%|███████▋ | 402/520 [25:13<07:15, 3.69s/it] {'loss': 8.9673, 'grad_norm': 4.022536244834619e-05, 'learning_rate': 0.6463146112104332, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:13<07:15, 3.69s/it] 78%|███████▊ | 403/520 [25:17<07:10, 3.68s/it] {'loss': 9.2008, 'grad_norm': 3.0964819160278174e-05, 'learning_rate': 0.6358945877920861, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:17<07:10, 3.68s/it] 78%|███████▊ | 404/520 [25:21<07:09, 3.70s/it] {'loss': 9.0917, 'grad_norm': 3.516573517913757e-05, 'learning_rate': 0.6255469925806643, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:21<07:09, 3.70s/it] 78%|███████▊ | 405/520 [25:25<07:05, 3.70s/it] {'loss': 9.9977, 'grad_norm': 4.7420687733975275e-05, 'learning_rate': 0.6152722276230504, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:25<07:05, 3.70s/it] 78%|███████▊ | 406/520 [25:28<07:01, 3.70s/it] {'loss': 10.0154, 'grad_norm': 6.139360501906995e-05, 'learning_rate': 0.6050706921363671, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:28<07:01, 3.70s/it] 78%|███████▊ | 407/520 [25:32<06:57, 3.69s/it] {'loss': 10.0758, 'grad_norm': 4.030878632432106e-05, 'learning_rate': 0.594942782492473, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:32<06:57, 3.69s/it] 78%|███████▊ | 408/520 [25:36<06:55, 3.71s/it] {'loss': 9.2201, 'grad_norm': 4.572696712822798e-05, 'learning_rate': 0.5848888922025552, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:36<06:55, 3.71s/it] 79%|███████▊ | 409/520 [25:39<06:50, 3.70s/it] {'loss': 10.0444, 'grad_norm': 4.9082255735034056e-05, 'learning_rate': 0.574909411901843, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:39<06:50, 3.70s/it] 79%|███████▉ | 410/520 [25:43<06:50, 3.73s/it] {'loss': 8.7269, 'grad_norm': 8.168419232213257e-05, 'learning_rate': 0.5650047293344316, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:43<06:50, 3.73s/it] 79%|███████▉ | 411/520 [25:47<06:51, 3.77s/it] {'loss': 9.6959, 'grad_norm': 4.300085963342657e-05, 'learning_rate': 0.5551752293382131, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:47<06:51, 3.77s/it] 79%|███████▉ | 412/520 [25:51<06:50, 3.80s/it] {'loss': 9.5552, 'grad_norm': 6.536354318807482e-05, 'learning_rate': 0.5454212938299254, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:51<06:50, 3.80s/it] 79%|███████▉ | 413/520 [25:55<06:49, 3.83s/it] {'loss': 10.7169, 'grad_norm': 5.885492892394626e-05, 'learning_rate': 0.5357433017903163, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:55<06:49, 3.83s/it] 80%|███████▉ | 414/520 [25:59<06:47, 3.85s/it] {'loss': 9.4397, 'grad_norm': 0.00025663114274654034, 'learning_rate': 0.5261416292494117, 'epoch': 0.8} + 80%|███████▉ | 414/520 [25:59<06:47, 3.85s/it] 80%|███████▉ | 415/520 [26:03<06:45, 3.86s/it] {'loss': 8.8503, 'grad_norm': 0.00011678748409701183, 'learning_rate': 0.5166166492719124, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:03<06:45, 3.86s/it] 80%|████████ | 416/520 [26:06<06:42, 3.87s/it] {'loss': 9.4713, 'grad_norm': 6.49887243260205e-05, 'learning_rate': 0.5071687319426945, 'epoch': 0.8} + 80%|████████ | 416/520 [26:06<06:42, 3.87s/it] 80%|████████ | 417/520 [26:10<06:38, 3.87s/it] {'loss': 9.4354, 'grad_norm': 4.278888714654034e-05, 'learning_rate': 0.49779824435243036, 'epoch': 0.8} + 80%|████████ | 417/520 [26:10<06:38, 3.87s/it] 80%|████████ | 418/520 [26:14<06:35, 3.88s/it] {'loss': 9.6324, 'grad_norm': 3.9300441367868075e-05, 'learning_rate': 0.4885055505833291, 'epoch': 0.8} + 80%|████████ | 418/520 [26:14<06:35, 3.88s/it] 81%|████████ | 419/520 [26:18<06:32, 3.89s/it] {'loss': 9.7949, 'grad_norm': 3.709532899955365e-05, 'learning_rate': 0.47929101169498695, 'epoch': 0.81} + 81%|████████ | 419/520 [26:18<06:32, 3.89s/it] 81%|████████ | 420/520 [26:22<06:28, 3.88s/it] {'loss': 9.1434, 'grad_norm': 3.6184003904067036e-05, 'learning_rate': 0.47015498571035874, 'epoch': 0.81} + 81%|████████ | 420/520 [26:22<06:28, 3.88s/it] 81%|████████ | 421/520 [26:26<06:24, 3.89s/it] {'loss': 8.9177, 'grad_norm': 5.742382015218607e-05, 'learning_rate': 0.4610978276018496, 'epoch': 0.81} + 81%|████████ | 421/520 [26:26<06:24, 3.89s/it] 81%|████████ | 422/520 [26:30<06:20, 3.88s/it] {'loss': 9.0559, 'grad_norm': 2.6443401662258376e-05, 'learning_rate': 0.4521198892775202, 'epoch': 0.81} + 81%|████████ | 422/520 [26:30<06:20, 3.88s/it] 81%|████████▏ | 423/520 [26:34<06:17, 3.89s/it] {'loss': 9.807, 'grad_norm': 3.982873737066699e-05, 'learning_rate': 0.4432215195674166, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:34<06:17, 3.89s/it] 82%|████████▏ | 424/520 [26:38<06:13, 3.89s/it] {'loss': 10.5668, 'grad_norm': 3.850487986633361e-05, 'learning_rate': 0.4344030642100133, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:38<06:13, 3.89s/it] 82%|████████▏ | 425/520 [26:42<06:10, 3.90s/it] {'loss': 9.0499, 'grad_norm': 3.766108875161311e-05, 'learning_rate': 0.42566486583878127, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:42<06:10, 3.90s/it] 82%|████████▏ | 426/520 [26:45<06:07, 3.91s/it] {'loss': 9.9137, 'grad_norm': 4.7377653886596545e-05, 'learning_rate': 0.41700726396887794, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:45<06:07, 3.91s/it] 82%|████████▏ | 427/520 [26:49<06:03, 3.91s/it] {'loss': 8.741, 'grad_norm': 3.901575085364036e-05, 'learning_rate': 0.4084305949839506, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:49<06:03, 3.91s/it] 82%|████████▏ | 428/520 [26:53<06:01, 3.92s/it] {'loss': 8.7645, 'grad_norm': 3.6147000712664185e-05, 'learning_rate': 0.3999351921230715, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:53<06:01, 3.92s/it] 82%|████████▎ | 429/520 [26:57<05:54, 3.89s/it] {'loss': 9.2674, 'grad_norm': 2.6685096221278645e-05, 'learning_rate': 0.39152138546778625, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:57<05:54, 3.89s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:01<05:43, 3.82s/it] {'loss': 8.5886, 'grad_norm': 4.722829985925959e-05, 'learning_rate': 0.3831895019292897, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:01<05:43, 3.82s/it] 83%|████████▎ | 431/520 [27:05<05:37, 3.79s/it] {'loss': 10.2845, 'grad_norm': 3.072007485286519e-05, 'learning_rate': 0.3749398652357272, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:05<05:37, 3.79s/it] 83%|████████▎ | 432/520 [27:08<05:32, 3.78s/it] {'loss': 8.9493, 'grad_norm': 2.987953005547688e-05, 'learning_rate': 0.366772795919611, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:08<05:32, 3.78s/it] 83%|████████▎ | 433/520 [27:12<05:27, 3.76s/it] {'loss': 9.3404, 'grad_norm': 2.6626417959363397e-05, 'learning_rate': 0.35868861130537166, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:12<05:27, 3.76s/it] 83%|████████▎ | 434/520 [27:16<05:22, 3.75s/it] {'loss': 8.2876, 'grad_norm': 5.171373506744004e-05, 'learning_rate': 0.35068762549702426, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:16<05:22, 3.75s/it] 84%|████████▎ | 435/520 [27:19<05:17, 3.73s/it] {'loss': 9.6587, 'grad_norm': 3.517697016273464e-05, 'learning_rate': 0.3427701493659674, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:19<05:17, 3.73s/it] 84%|████████▍ | 436/520 [27:23<05:13, 3.74s/it] {'loss': 9.0147, 'grad_norm': 2.998644411571731e-05, 'learning_rate': 0.33493649053890323, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:23<05:13, 3.74s/it] 84%|████████▍ | 437/520 [27:27<05:09, 3.73s/it] {'loss': 9.9418, 'grad_norm': 3.823937595097495e-05, 'learning_rate': 0.327186953385884, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:27<05:09, 3.73s/it] 84%|████████▍ | 438/520 [27:31<05:05, 3.72s/it] {'loss': 8.6548, 'grad_norm': 3.663134019726962e-05, 'learning_rate': 0.3195218390084867, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:31<05:05, 3.72s/it] 84%|████████▍ | 439/520 [27:34<05:03, 3.75s/it] {'loss': 9.8412, 'grad_norm': 4.333124527626294e-05, 'learning_rate': 0.3119414452281158, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:34<05:03, 3.75s/it] 85%|████████▍ | 440/520 [27:38<05:03, 3.80s/it] {'loss': 9.3779, 'grad_norm': 2.6822658561747377e-05, 'learning_rate': 0.30444606657442835, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:38<05:03, 3.80s/it] 85%|████████▍ | 441/520 [27:42<05:03, 3.85s/it] {'loss': 10.18, 'grad_norm': 3.214990140595077e-05, 'learning_rate': 0.297035994273894, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:42<05:03, 3.85s/it] 85%|████████▌ | 442/520 [27:46<05:01, 3.86s/it] {'loss': 9.3488, 'grad_norm': 2.817774954458384e-05, 'learning_rate': 0.28971151623847585, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:46<05:01, 3.86s/it] 85%|████████▌ | 443/520 [27:50<04:56, 3.86s/it] {'loss': 9.4029, 'grad_norm': 3.1124323218945724e-05, 'learning_rate': 0.2824729170544457, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:50<04:56, 3.86s/it] 85%|████████▌ | 444/520 [27:54<04:54, 3.87s/it] {'loss': 9.2428, 'grad_norm': 2.930569617447291e-05, 'learning_rate': 0.27532047797132864, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:54<04:54, 3.87s/it] 86%|████████▌ | 445/520 [27:58<04:50, 3.87s/it] {'loss': 8.9908, 'grad_norm': 3.423509303012978e-05, 'learning_rate': 0.2682544768909717, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:58<04:50, 3.87s/it] 86%|████████▌ | 446/520 [28:02<04:46, 3.87s/it] {'loss': 10.3592, 'grad_norm': 2.8847503396573368e-05, 'learning_rate': 0.2612751883567477, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:02<04:46, 3.87s/it] 86%|████████▌ | 447/520 [28:06<04:42, 3.87s/it] {'loss': 9.8137, 'grad_norm': 3.4273314893288184e-05, 'learning_rate': 0.2543828835428899, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:06<04:42, 3.87s/it] 86%|████████▌ | 448/520 [28:09<04:39, 3.88s/it] {'loss': 9.1384, 'grad_norm': 2.7914453212537026e-05, 'learning_rate': 0.2475778302439524, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:09<04:39, 3.88s/it] 86%|████████▋ | 449/520 [28:13<04:34, 3.86s/it] {'loss': 10.425, 'grad_norm': 2.643733165533024e-05, 'learning_rate': 0.2408602928644088, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:13<04:34, 3.86s/it] 87%|████████▋ | 450/520 [28:17<04:27, 3.82s/it] {'loss': 9.606, 'grad_norm': 3.134868171069947e-05, 'learning_rate': 0.23423053240837516, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:17<04:27, 3.82s/it] 87%|████████▋ | 451/520 [28:21<04:20, 3.78s/it] {'loss': 9.7618, 'grad_norm': 3.1141316596374895e-05, 'learning_rate': 0.22768880646947265, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:21<04:20, 3.78s/it] 87%|████████▋ | 452/520 [28:24<04:14, 3.75s/it] {'loss': 10.2594, 'grad_norm': 3.388918418054495e-05, 'learning_rate': 0.22123536922081716, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:24<04:14, 3.75s/it] 87%|████████▋ | 453/520 [28:28<04:10, 3.73s/it] {'loss': 10.4707, 'grad_norm': 3.0475056881020102e-05, 'learning_rate': 0.21487047140514248, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:28<04:10, 3.73s/it] 87%|████████▋ | 454/520 [28:32<04:05, 3.71s/it] {'loss': 9.1093, 'grad_norm': 3.0275307301577408e-05, 'learning_rate': 0.2085943603250595, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:32<04:05, 3.71s/it] 88%|████████▊ | 455/520 [28:35<04:01, 3.71s/it] {'loss': 9.4753, 'grad_norm': 3.438197771999406e-05, 'learning_rate': 0.20240727983344836, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:35<04:01, 3.71s/it] 88%|████████▊ | 456/520 [28:39<03:56, 3.70s/it] {'loss': 9.048, 'grad_norm': 3.5180880331088393e-05, 'learning_rate': 0.19630947032398066, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:39<03:56, 3.70s/it] 88%|████████▊ | 457/520 [28:43<03:53, 3.70s/it] {'loss': 11.2907, 'grad_norm': 4.103903495642337e-05, 'learning_rate': 0.19030116872178315, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:43<03:53, 3.70s/it] 88%|████████▊ | 458/520 [28:46<03:49, 3.70s/it] {'loss': 9.8334, 'grad_norm': 3.664913720660747e-05, 'learning_rate': 0.18438260847422838, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:46<03:49, 3.70s/it] 88%|████████▊ | 459/520 [28:50<03:45, 3.70s/it] {'loss': 9.5606, 'grad_norm': 3.403664337694726e-05, 'learning_rate': 0.17855401954186612, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:50<03:45, 3.70s/it] 88%|████████▊ | 460/520 [28:54<03:41, 3.70s/it] {'loss': 9.0118, 'grad_norm': 3.4657919342457966e-05, 'learning_rate': 0.17281562838948966, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:54<03:41, 3.70s/it] 89%|████████▊ | 461/520 [28:58<03:38, 3.70s/it] {'loss': 11.2489, 'grad_norm': 4.365001829753247e-05, 'learning_rate': 0.16716765797733374, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:58<03:38, 3.70s/it] 89%|████████▉ | 462/520 [29:01<03:34, 3.69s/it] {'loss': 10.804, 'grad_norm': 3.352672334760117e-05, 'learning_rate': 0.16161032775241502, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:01<03:34, 3.69s/it] 89%|████████▉ | 463/520 [29:05<03:30, 3.70s/it] {'loss': 9.2123, 'grad_norm': 3.88521232653285e-05, 'learning_rate': 0.15614385364000227, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:05<03:30, 3.70s/it] 89%|████████▉ | 464/520 [29:09<03:27, 3.70s/it] {'loss': 9.8961, 'grad_norm': 3.5581220527681806e-05, 'learning_rate': 0.1507684480352292, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:09<03:27, 3.70s/it] 89%|████████▉ | 465/520 [29:12<03:24, 3.71s/it] {'loss': 10.2629, 'grad_norm': 5.2097131290864295e-05, 'learning_rate': 0.14548431979484133, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:12<03:24, 3.71s/it] 90%|████████▉ | 466/520 [29:16<03:21, 3.73s/it] {'loss': 9.3136, 'grad_norm': 3.7347037468157476e-05, 'learning_rate': 0.14029167422908106, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:16<03:21, 3.73s/it] 90%|████████▉ | 467/520 [29:20<03:19, 3.76s/it] {'loss': 10.4321, 'grad_norm': 3.015201588723059e-05, 'learning_rate': 0.13519071309370995, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:20<03:19, 3.76s/it] 90%|█████████ | 468/520 [29:24<03:15, 3.76s/it] {'loss': 9.7547, 'grad_norm': 3.703575709045783e-05, 'learning_rate': 0.13018163458217075, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:24<03:15, 3.76s/it] 90%|█████████ | 469/520 [29:28<03:12, 3.78s/it] {'loss': 9.8927, 'grad_norm': 4.017386095507236e-05, 'learning_rate': 0.125264633317885, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:28<03:12, 3.78s/it] 90%|█████████ | 470/520 [29:31<03:10, 3.81s/it] {'loss': 9.33, 'grad_norm': 2.924085181532292e-05, 'learning_rate': 0.1204399003466941, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:31<03:10, 3.81s/it] 91%|█████████ | 471/520 [29:35<03:08, 3.84s/it] {'loss': 10.0544, 'grad_norm': 4.833660213775372e-05, 'learning_rate': 0.11570762312943295, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:35<03:08, 3.84s/it] 91%|█████████ | 472/520 [29:39<03:05, 3.86s/it] {'loss': 9.2853, 'grad_norm': 3.0678264187709666e-05, 'learning_rate': 0.11106798553464803, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:39<03:05, 3.86s/it] 91%|█████████ | 473/520 [29:43<03:01, 3.86s/it] {'loss': 9.3639, 'grad_norm': 3.031672163118172e-05, 'learning_rate': 0.10652116783145482, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:43<03:01, 3.86s/it] 91%|█████████ | 474/520 [29:47<02:58, 3.87s/it] {'loss': 10.754, 'grad_norm': 3.6386471425473244e-05, 'learning_rate': 0.10206734668253059, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:47<02:58, 3.87s/it] 91%|█████████▏| 475/520 [29:51<02:53, 3.86s/it] {'loss': 9.7213, 'grad_norm': 4.2389017181427935e-05, 'learning_rate': 0.09770669513725128, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:51<02:53, 3.86s/it] 92%|█████████▏| 476/520 [29:55<02:50, 3.87s/it] {'loss': 9.54, 'grad_norm': 3.402809045899137e-05, 'learning_rate': 0.09343938262496992, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:55<02:50, 3.87s/it] 92%|█████████▏| 477/520 [29:59<02:46, 3.87s/it] {'loss': 9.2039, 'grad_norm': 3.289500247925625e-05, 'learning_rate': 0.08926557494843085, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:59<02:46, 3.87s/it] 92%|█████████▏| 478/520 [30:03<02:43, 3.89s/it] {'loss': 9.0448, 'grad_norm': 3.384043712056123e-05, 'learning_rate': 0.0851854342773295, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:03<02:43, 3.89s/it] 92%|█████████▏| 479/520 [30:06<02:38, 3.85s/it] {'loss': 10.5453, 'grad_norm': 2.748099701006392e-05, 'learning_rate': 0.08119911914200972, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:06<02:38, 3.85s/it] 92%|█████████▏| 480/520 [30:10<02:32, 3.81s/it] {'loss': 10.4755, 'grad_norm': 2.8472097135125814e-05, 'learning_rate': 0.07730678442730538, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:10<02:32, 3.81s/it] 92%|█████████▎| 481/520 [30:14<02:27, 3.79s/it] {'loss': 10.2749, 'grad_norm': 2.5427708433801777e-05, 'learning_rate': 0.07350858136652261, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:14<02:27, 3.79s/it] 93%|█████████▎| 482/520 [30:17<02:22, 3.76s/it] {'loss': 10.7803, 'grad_norm': 2.7950506833244914e-05, 'learning_rate': 0.06980465753556375, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:17<02:22, 3.76s/it] 93%|█████████▎| 483/520 [30:21<02:18, 3.74s/it] {'loss': 9.7132, 'grad_norm': 3.217985323738643e-05, 'learning_rate': 0.06619515684719163, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:21<02:18, 3.74s/it] 93%|█████████▎| 484/520 [30:25<02:15, 3.77s/it] {'loss': 9.7571, 'grad_norm': 3.1014458550906025e-05, 'learning_rate': 0.06268021954544095, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:25<02:15, 3.77s/it] 93%|█████████▎| 485/520 [30:29<02:11, 3.76s/it] {'loss': 9.1912, 'grad_norm': 2.8427911600625665e-05, 'learning_rate': 0.059259982200166594, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:29<02:11, 3.76s/it] 93%|█████████▎| 486/520 [30:32<02:06, 3.73s/it] {'loss': 9.622, 'grad_norm': 4.021677072794444e-05, 'learning_rate': 0.05593457770173865, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:32<02:06, 3.73s/it] 94%|█████████▎| 487/520 [30:36<02:02, 3.72s/it] {'loss': 8.8659, 'grad_norm': 3.4287975617567296e-05, 'learning_rate': 0.05270413525587908, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:36<02:02, 3.72s/it] 94%|█████████▍| 488/520 [30:40<01:58, 3.72s/it] {'loss': 9.155, 'grad_norm': 2.734331932251048e-05, 'learning_rate': 0.04956878037864043, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:40<01:58, 3.72s/it] 94%|█████████▍| 489/520 [30:44<01:55, 3.73s/it] {'loss': 10.1948, 'grad_norm': 2.9988589994814555e-05, 'learning_rate': 0.04652863489153086, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:44<01:55, 3.73s/it] 94%|█████████▍| 490/520 [30:48<01:53, 3.80s/it] {'loss': 9.2814, 'grad_norm': 2.5338862459251626e-05, 'learning_rate': 0.04358381691677932, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:48<01:53, 3.80s/it] 94%|█████████▍| 491/520 [30:51<01:50, 3.81s/it] {'loss': 9.2208, 'grad_norm': 2.632575318677906e-05, 'learning_rate': 0.04073444087274669, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:51<01:50, 3.81s/it] 95%|█████████▍| 492/520 [30:55<01:47, 3.83s/it] {'loss': 9.4756, 'grad_norm': 2.9900496286555866e-05, 'learning_rate': 0.03798061746947995, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:55<01:47, 3.83s/it] 95%|█████████▍| 493/520 [30:59<01:43, 3.83s/it] {'loss': 10.6646, 'grad_norm': 2.9390294531969836e-05, 'learning_rate': 0.035322453704410284, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:59<01:43, 3.83s/it] 95%|█████████▌| 494/520 [31:03<01:39, 3.84s/it] {'loss': 9.5444, 'grad_norm': 2.8557844819300326e-05, 'learning_rate': 0.032760052858197275, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:03<01:39, 3.84s/it] 95%|█████████▌| 495/520 [31:07<01:36, 3.86s/it] {'loss': 8.7402, 'grad_norm': 3.851027294382253e-05, 'learning_rate': 0.030293514490713214, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:07<01:36, 3.86s/it] 95%|█████████▌| 496/520 [31:11<01:32, 3.85s/it] {'loss': 9.0091, 'grad_norm': 2.945083367979889e-05, 'learning_rate': 0.027922934437178693, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:11<01:32, 3.85s/it] 96%|█████████▌| 497/520 [31:15<01:28, 3.85s/it] {'loss': 10.0529, 'grad_norm': 3.1408027079501525e-05, 'learning_rate': 0.025648404804435032, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:15<01:28, 3.85s/it] 96%|█████████▌| 498/520 [31:18<01:24, 3.86s/it] {'loss': 9.1009, 'grad_norm': 2.9752069907128628e-05, 'learning_rate': 0.023470013967367975, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:18<01:24, 3.86s/it] 96%|█████████▌| 499/520 [31:22<01:21, 3.87s/it] {'loss': 10.6679, 'grad_norm': 3.2009728145409695e-05, 'learning_rate': 0.021387846565474045, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:22<01:21, 3.87s/it] 96%|█████████▌| 500/520 [31:26<01:17, 3.86s/it] {'loss': 9.9656, 'grad_norm': 3.864853750188321e-05, 'learning_rate': 0.01940198349956984, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:26<01:17, 3.86s/it] 96%|█████████▋| 501/520 [31:30<01:13, 3.86s/it] {'loss': 10.3459, 'grad_norm': 2.5702943133412343e-05, 'learning_rate': 0.017512501928650948, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:30<01:13, 3.86s/it] 97%|█████████▋| 502/520 [31:34<01:09, 3.85s/it] {'loss': 9.397, 'grad_norm': 3.219598048152216e-05, 'learning_rate': 0.01571947526689349, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:34<01:09, 3.85s/it] 97%|█████████▋| 503/520 [31:38<01:05, 3.87s/it] {'loss': 10.3002, 'grad_norm': 2.7189081051317937e-05, 'learning_rate': 0.01402297318080059, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:38<01:05, 3.87s/it] 97%|█████████▋| 504/520 [31:42<01:01, 3.87s/it] {'loss': 9.7092, 'grad_norm': 3.661488153735965e-05, 'learning_rate': 0.012423061586496476, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:42<01:01, 3.87s/it] 97%|█████████▋| 505/520 [31:45<00:57, 3.86s/it] {'loss': 9.5818, 'grad_norm': 2.9793389268747953e-05, 'learning_rate': 0.010919802647165466, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:45<00:57, 3.86s/it] 97%|█████████▋| 506/520 [31:49<00:53, 3.85s/it] {'loss': 9.0345, 'grad_norm': 2.7697909355546334e-05, 'learning_rate': 0.009513254770636137, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:49<00:53, 3.85s/it] 98%|█████████▊| 507/520 [31:53<00:50, 3.86s/it] {'loss': 11.1004, 'grad_norm': 4.2961979205020925e-05, 'learning_rate': 0.008203472607112294, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:53<00:50, 3.86s/it] 98%|█████████▊| 508/520 [31:57<00:46, 3.85s/it] {'loss': 9.7659, 'grad_norm': 3.6487046282117175e-05, 'learning_rate': 0.006990507047049677, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:57<00:46, 3.85s/it] 98%|█████████▊| 509/520 [32:01<00:42, 3.85s/it] {'loss': 9.1899, 'grad_norm': 3.3367404550778355e-05, 'learning_rate': 0.005874405219177814, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:01<00:42, 3.85s/it] 98%|█████████▊| 510/520 [32:05<00:38, 3.86s/it] {'loss': 9.2329, 'grad_norm': 2.569136300116037e-05, 'learning_rate': 0.004855210488670381, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:05<00:38, 3.86s/it] 98%|█████████▊| 511/520 [32:09<00:34, 3.85s/it] {'loss': 9.3606, 'grad_norm': 2.7396803578143253e-05, 'learning_rate': 0.0039329624554584885, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:09<00:34, 3.85s/it] 98%|█████████▊| 512/520 [32:12<00:30, 3.85s/it] {'loss': 8.8919, 'grad_norm': 3.412127634526295e-05, 'learning_rate': 0.003107696952694139, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:12<00:30, 3.85s/it] 99%|█████████▊| 513/520 [32:16<00:26, 3.86s/it] {'loss': 9.454, 'grad_norm': 2.5935246519619315e-05, 'learning_rate': 0.0023794460453555044, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:16<00:26, 3.86s/it] 99%|█████████▉| 514/520 [32:20<00:23, 3.86s/it] {'loss': 9.4645, 'grad_norm': 2.876433367812725e-05, 'learning_rate': 0.0017482380290034794, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:20<00:23, 3.86s/it] 99%|█████████▉| 515/520 [32:24<00:19, 3.86s/it] {'loss': 9.8371, 'grad_norm': 3.9377799356108336e-05, 'learning_rate': 0.0012140974286808937, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:24<00:19, 3.86s/it] 99%|█████████▉| 516/520 [32:28<00:15, 3.86s/it] {'loss': 9.4117, 'grad_norm': 2.6574163092242395e-05, 'learning_rate': 0.0007770449979593863, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:28<00:15, 3.86s/it] 99%|█████████▉| 517/520 [32:32<00:11, 3.86s/it] {'loss': 10.5282, 'grad_norm': 2.6233855873553935e-05, 'learning_rate': 0.0004370977181339386, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:32<00:11, 3.86s/it] 100%|█████████▉| 518/520 [32:36<00:07, 3.85s/it] {'loss': 9.3672, 'grad_norm': 2.493403862583992e-05, 'learning_rate': 0.00019426879756284654, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:36<00:07, 3.85s/it] 100%|█████████▉| 519/520 [32:39<00:03, 3.85s/it] {'loss': 10.3114, 'grad_norm': 2.6146756003675925e-05, 'learning_rate': 4.856767115452021e-05, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:39<00:03, 3.85s/it] 100%|██████████| 520/520 [32:44<00:00, 4.09s/it] {'loss': 10.6558, 'grad_norm': 3.3860937894489155e-05, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:44<00:00, 4.09s/it] {'train_runtime': 1964.563, 'train_samples_per_second': 33.865, 'train_steps_per_second': 0.265, 'train_loss': 9.685497407729809, 'epoch': 1.0} + 100%|██████████| 520/520 [32:44<00:00, 4.09s/it] 100%|██████████| 520/520 [32:44<00:00, 3.78s/it] +[2025-10-10 08:05:41,440] [INFO] [launch.py:348:main] Process 1832688 exits successfully. +[2025-10-10 08:05:41,440] [INFO] [launch.py:348:main] Process 1832685 exits successfully. +[2025-10-10 08:05:41,441] [INFO] [launch.py:348:main] Process 1832684 exits successfully. +[2025-10-10 08:05:42,442] [INFO] [launch.py:348:main] Process 1832687 exits successfully. +[2025-10-10 08:05:42,443] [INFO] [launch.py:348:main] Process 1832686 exits successfully. +[2025-10-10 08:05:42,444] [INFO] [launch.py:348:main] Process 1832682 exits successfully. +[2025-10-10 08:05:42,444] [INFO] [launch.py:348:main] Process 1832683 exits successfully. +[2025-10-10 08:05:46,449] [INFO] [launch.py:348:main] Process 1832681 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251010_072428.log +Timestamp: 2025-10-10 08:05:48 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation_20251010_102903.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation_20251010_102903.log new file mode 100644 index 0000000000000000000000000000000000000000..893c17ac4bc7e8575650c197c85b604799bbe761 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation_20251010_102903.log @@ -0,0 +1,1778 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation_20251010_102903.log +Timestamp: 2025-10-10 10:29:03 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 10:29:05,881] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:29:08,528] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 10:29:08,529] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 5e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 5e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 10:29:11,144] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:29:12,236] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 10:29:12,236] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 10:29:12,236] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 10:29:12,236] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 10:29:12,236] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 10:29:12,236] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 10:29:12,236] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 10:29:12,238] [INFO] [launch.py:253:main] process 1986915 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:29:12,240] [INFO] [launch.py:253:main] process 1986916 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:29:12,243] [INFO] [launch.py:253:main] process 1986917 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:29:12,245] [INFO] [launch.py:253:main] process 1986918 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:29:12,247] [INFO] [launch.py:253:main] process 1986919 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:29:12,249] [INFO] [launch.py:253:main] process 1986920 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:29:12,251] [INFO] [launch.py:253:main] process 1986921 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 10:29:12,253] [INFO] [launch.py:253:main] process 1986922 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 10:29:18,901] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:29:19,058] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:29:19,228] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:29:19,278] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:29:19,287] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:29:19,287] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:29:19,310] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:29:19,311] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:29:19,311] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 10:29:19,460] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:29:19,460] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 10:29:19,629] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:29:19,684] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:29:19,685] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:29:19,689] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:29:19,714] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 10:29:19,717] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1986915:1986915 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986915:1986915 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986915:1986915 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1986915:1986915 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1986915:1986915 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1986915:1986915 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Using network Socket +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1986916:1986916 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1986916:1986916 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986916:1986916 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986916:1986916 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1986916:1986916 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1986916:1986916 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1986919:1986919 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1986919:1986919 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986919:1986919 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986919:1986919 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1986919:1986919 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1986919:1986919 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1986922:1986922 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1986922:1986922 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986922:1986922 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986922:1986922 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1986922:1986922 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1986922:1986922 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1986918:1986918 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1986918:1986918 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986918:1986918 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986918:1986918 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1986918:1986918 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1986918:1986918 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1986921:1986921 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1986921:1986921 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986921:1986921 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986921:1986921 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1986921:1986921 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1986921:1986921 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1986920:1986920 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1986920:1986920 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986920:1986920 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986920:1986920 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1986920:1986920 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1986920:1986920 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1986917:1986917 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1986917:1986917 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986917:1986917 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986917:1986917 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1986917:1986917 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1986917:1986917 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO ncclCommInitRank comm 0x5622a8a43390 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xe8c188da34a306b - Init START +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO ncclCommInitRank comm 0x562a1b431ff0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xe8c188da34a306b - Init START +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO ncclCommInitRank comm 0x5645da070890 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xe8c188da34a306b - Init START +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO ncclCommInitRank comm 0x55a0fefaad60 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xe8c188da34a306b - Init START +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO ncclCommInitRank comm 0x55cce65d8880 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xe8c188da34a306b - Init START +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO ncclCommInitRank comm 0x55b6e0f3d3b0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xe8c188da34a306b - Init START +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO ncclCommInitRank comm 0x55cb62fe9830 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xe8c188da34a306b - Init START +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO ncclCommInitRank comm 0x55cac52bcc40 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xe8c188da34a306b - Init START +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO comm 0x55cce65d8880 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO comm 0x55cb62fe9830 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO comm 0x562a1b431ff0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO comm 0x55a0fefaad60 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO comm 0x5645da070890 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO comm 0x55cac52bcc40 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO comm 0x5622a8a43390 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO comm 0x55b6e0f3d3b0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1986920:1988541 [5] NCCL INFO ncclCommInitRank comm 0x55cac52bcc40 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xe8c188da34a306b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986921:1988523 [6] NCCL INFO ncclCommInitRank comm 0x55cce65d8880 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xe8c188da34a306b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986922:1988509 [7] NCCL INFO ncclCommInitRank comm 0x55cb62fe9830 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xe8c188da34a306b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1986919:1988504 [4] NCCL INFO ncclCommInitRank comm 0x5645da070890 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xe8c188da34a306b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1986916:1988503 [1] NCCL INFO ncclCommInitRank comm 0x55b6e0f3d3b0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xe8c188da34a306b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986918:1988520 [3] NCCL INFO ncclCommInitRank comm 0x55a0fefaad60 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xe8c188da34a306b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986917:1988542 [2] NCCL INFO ncclCommInitRank comm 0x5622a8a43390 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xe8c188da34a306b - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1986915:1988502 [0] NCCL INFO ncclCommInitRank comm 0x562a1b431ff0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xe8c188da34a306b - Init COMPLETE +[2025-10-10 10:30:04,284] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.laSome weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +yers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 16:20:44,239] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 16:21:02,190 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 16:21:02,195 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:006->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1986920:1998280 [5] NCCL INFO ncclCommInitRank comm 0x7fb47c06b350 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x366952fee6f42ea9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986916:1998281 [1] NCCL INFO ncclCommInitRank comm 0x7f41c406a350 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x366952fee6f42ea9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986922:1998276 [7] NCCL INFO ncclCommInitRank comm 0x7f8ecc06a7d0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x366952fee6f42ea9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986921:1998279 [6] NCCL INFO ncclCommInitRank comm 0x7f2aa406b1a0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x366952fee6f42ea9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986919:1998278 [4] NCCL INFO ncclCommInitRank comm 0x7f50cc06ba80 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x366952fee6f42ea9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986918:1998282 [3] NCCL INFO ncclCommInitRank comm 0x7fa9ac06afd0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x366952fee6f42ea9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986917:1998277 [2] NCCL INFO ncclCommInitRank comm 0x7f955806b6f0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x366952fee6f42ea9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1986915:1998275 [0] NCCL INFO ncclCommInitRank comm 0x7f60e406b940 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x366952fee6f42ea9 - Init COMPLETE + 0%| | 1/520 [00:14<2:02:22, 14.15s/it] {'loss': 2.0453, 'grad_norm': 0.004834457161693542, 'learning_rate': 0.03125, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:02:22, 14.15s/it] 0%| | 2/520 [00:17<1:09:05, 8.00s/it] {'loss': 2.0549, 'grad_norm': 0.005249585878964149, 'learning_rate': 0.0625, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:09:05, 8.00s/it] \ No newline at end of file diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_060221.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_060221.log new file mode 100644 index 0000000000000000000000000000000000000000..7fc6542c9cd618297d0ca2b7a384a9a547870ac7 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_060221.log @@ -0,0 +1,7 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_060221.log +Timestamp: 2025-10-10 06:02:21 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 06:02:24,057] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_080549.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_080549.log new file mode 100644 index 0000000000000000000000000000000000000000..26c0a6bd9aef26a42bdb1392f78c99527c9d80a8 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_080549.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_080549.log +Timestamp: 2025-10-10 08:05:49 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 08:05:51,709] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:05:54,372] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 08:05:54,373] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 7 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 7 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 08:05:57,019] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:05:58,069] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 08:05:58,070] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 08:05:58,070] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 08:05:58,070] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 08:05:58,070] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 08:05:58,070] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 08:05:58,070] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 08:05:58,072] [INFO] [launch.py:253:main] process 1897680 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:05:58,074] [INFO] [launch.py:253:main] process 1897681 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:05:58,076] [INFO] [launch.py:253:main] process 1897682 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:05:58,078] [INFO] [launch.py:253:main] process 1897683 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:05:58,081] [INFO] [launch.py:253:main] process 1897684 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:05:58,083] [INFO] [launch.py:253:main] process 1897685 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:05:58,085] [INFO] [launch.py:253:main] process 1897686 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:05:58,087] [INFO] [launch.py:253:main] process 1897687 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 08:06:04,833] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:05,050] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:05,050] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:05,100] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:05,101] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:05,127] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:05,127] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:05,140] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:06:05,245] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:05,468] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:05,470] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:05,518] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:05,519] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:05,542] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:05,580] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:05,593] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:06:05,593] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1897680:1897680 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897680:1897680 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897680:1897680 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1897680:1897680 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1897680:1897680 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1897680:1897680 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1897682:1897682 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1897682:1897682 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897682:1897682 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897682:1897682 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1897682:1897682 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1897682:1897682 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1897687:1897687 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1897687:1897687 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897687:1897687 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897686:1897686 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1897686:1897686 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897686:1897686 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897687:1897687 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1897687:1897687 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1897687:1897687 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1897686:1897686 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1897686:1897686 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1897686:1897686 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1897685:1897685 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1897685:1897685 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897685:1897685 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897685:1897685 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1897685:1897685 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1897685:1897685 [5] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1897684:1897684 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1897684:1897684 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897684:1897684 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897684:1897684 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1897684:1897684 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1897684:1897684 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1897683:1897683 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1897683:1897683 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897683:1897683 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897683:1897683 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1897683:1897683 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1897683:1897683 [3] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1897681:1897681 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1897681:1897681 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897681:1897681 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897681:1897681 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1897681:1897681 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1897681:1897681 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO ncclCommInitRank comm 0x55dd4d601be0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x3642989ca64de82d - Init START +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO ncclCommInitRank comm 0x55c5d4711010 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x3642989ca64de82d - Init START +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO ncclCommInitRank comm 0x55937d5760c0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x3642989ca64de82d - Init START +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO ncclCommInitRank comm 0x564dbc999040 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x3642989ca64de82d - Init START +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO ncclCommInitRank comm 0x5598a0a37800 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x3642989ca64de82d - Init START +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO ncclCommInitRank comm 0x5595f7f58bd0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x3642989ca64de82d - Init START +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO ncclCommInitRank comm 0x55a2bd955ac0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x3642989ca64de82d - Init START +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO ncclCommInitRank comm 0x55a12d9d04a0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x3642989ca64de82d - Init START +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO comm 0x5595f7f58bd0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO comm 0x55a12d9d04a0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO comm 0x55a2bd955ac0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO comm 0x55937d5760c0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO comm 0x55c5d4711010 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO comm 0x564dbc999040 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO comm 0x5598a0a37800 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO comm 0x55dd4d601be0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1897686:1899294 [6] NCCL INFO ncclCommInitRank comm 0x5595f7f58bd0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x3642989ca64de82d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897684:1899296 [4] NCCL INFO ncclCommInitRank comm 0x55c5d4711010 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x3642989ca64de82d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897685:1899295 [5] NCCL INFO ncclCommInitRank comm 0x55a12d9d04a0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x3642989ca64de82d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897687:1899293 [7] NCCL INFO ncclCommInitRank comm 0x55dd4d601be0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x3642989ca64de82d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1897681:1899315 [1] NCCL INFO ncclCommInitRank comm 0x564dbc999040 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x3642989ca64de82d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1897680:1899275 [0] NCCL INFO ncclCommInitRank comm 0x5598a0a37800 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x3642989ca64de82d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897683:1899314 [3] NCCL INFO ncclCommInitRank comm 0x55937d5760c0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x3642989ca64de82d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897682:1899276 [2] NCCL INFO ncclCommInitRank comm 0x55a2bd955ac0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x3642989ca64de82d - Init COMPLETE +[2025-10-10 08:06:46,135] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.laSome weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from yers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 08:07:45,777] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 08:08:04,023 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 08:08:04,035 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:006->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1897683:1904385 [3] NCCL INFO ncclCommInitRank comm 0x7f9d9006aaf0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x2b1a1afcb396dcfb - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897680:1904381 [0] NCCL INFO ncclCommInitRank comm 0x7f7e2c06aaf0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x2b1a1afcb396dcfb - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897681:1904383 [1] NCCL INFO ncclCommInitRank comm 0x7f477c06afe0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x2b1a1afcb396dcfb - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897682:1904382 [2] NCCL INFO ncclCommInitRank comm 0x7fc88c06a800 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x2b1a1afcb396dcfb - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897686:1904388 [6] NCCL INFO ncclCommInitRank comm 0x7f422006b000 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x2b1a1afcb396dcfb - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897687:1904384 [7] NCCL INFO ncclCommInitRank comm 0x7fec7806a990 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x2b1a1afcb396dcfb - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897685:1904386 [5] NCCL INFO ncclCommInitRank comm 0x7fc67406a930 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x2b1a1afcb396dcfb - Init COMPLETE +ywang29-vrdb-test1-worker-0:1897684:1904387 [4] NCCL INFO ncclCommInitRank comm 0x7fe6a806a850 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x2b1a1afcb396dcfb - Init COMPLETE + 0%| | 1/520 [00:14<2:03:59, 14.33s/it] {'loss': 2.0453, 'grad_norm': 0.004835102615504125, 'learning_rate': 0.4375, 'epoch': 0.0} + 0%| | 1/520 [00:14<2:03:59, 14.33s/it] 0%| | 2/520 [00:18<1:11:03, 8.23s/it] {'loss': 2.0549, 'grad_norm': 0.00524870128212418, 'learning_rate': 0.875, 'epoch': 0.0} + 0%| | 2/520 [00:18<1:11:03, 8.23s/it] 1%| | 3/520 [00:22<53:41, 6.23s/it] {'loss': 1.6861, 'grad_norm': 0.0025401117160802978, 'learning_rate': 1.3125, 'epoch': 0.01} + 1%| | 3/520 [00:22<53:41, 6.23s/it] 1%| | 4/520 [00:26<45:39, 5.31s/it] {'loss': 1.7636, 'grad_norm': 0.005953797254586541, 'learning_rate': 1.75, 'epoch': 0.01} + 1%| | 4/520 [00:26<45:39, 5.31s/it] 1%| | 5/520 [00:29<40:32, 4.72s/it] {'loss': 5.7962, 'grad_norm': 0.35566795086703545, 'learning_rate': 2.1875, 'epoch': 0.01} + 1%| | 5/520 [00:29<40:32, 4.72s/it] 1%| | 6/520 [00:33<37:33, 4.38s/it] {'loss': 7.7006, 'grad_norm': 0.6468955536684508, 'learning_rate': 2.625, 'epoch': 0.01} + 1%| | 6/520 [00:33<37:33, 4.38s/it] 1%|▏ | 7/520 [00:37<35:27, 4.15s/it] {'loss': 16.4241, 'grad_norm': 0.814392919437929, 'learning_rate': 3.0625, 'epoch': 0.01} + 1%|▏ | 7/520 [00:37<35:27, 4.15s/it] 2%|▏ | 8/520 [00:41<35:52, 4.20s/it] {'loss': 22.6545, 'grad_norm': 1.0835529557195238, 'learning_rate': 3.5, 'epoch': 0.02} + 2%|▏ | 8/520 [00:41<35:52, 4.20s/it] 2%|▏ | 9/520 [00:45<35:57, 4.22s/it] {'loss': 12.6418, 'grad_norm': 0.13832756218143877, 'learning_rate': 3.9375, 'epoch': 0.02} + 2%|▏ | 9/520 [00:45<35:57, 4.22s/it] 2%|▏ | 10/520 [00:49<35:06, 4.13s/it] {'loss': 11.3862, 'grad_norm': 0.06740807857835743, 'learning_rate': 4.375, 'epoch': 0.02} + 2%|▏ | 10/520 [00:49<35:06, 4.13s/it] 2%|▏ | 11/520 [00:53<34:34, 4.08s/it] {'loss': 11.0574, 'grad_norm': 0.03674915967867772, 'learning_rate': 4.8125, 'epoch': 0.02} + 2%|▏ | 11/520 [00:53<34:34, 4.08s/it] 2%|▏ | 12/520 [00:57<33:29, 3.96s/it] {'loss': 10.8294, 'grad_norm': 0.017355630383816458, 'learning_rate': 5.25, 'epoch': 0.02} + 2%|▏ | 12/520 [00:57<33:29, 3.96s/it][2025-10-10 08:09:10,220] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:01<34:25, 4.07s/it] {'loss': 9.7826, 'grad_norm': 0.005304884861031146, 'learning_rate': 5.6875, 'epoch': 0.03} + 2%|▎ | 13/520 [01:01<34:25, 4.07s/it] 3%|▎ | 14/520 [01:05<33:18, 3.95s/it] {'loss': 10.0217, 'grad_norm': 0.0037608084256213537, 'learning_rate': 6.125, 'epoch': 0.03} + 3%|▎ | 14/520 [01:05<33:18, 3.95s/it] 3%|▎ | 15/520 [01:08<32:40, 3.88s/it] {'loss': 10.6445, 'grad_norm': 0.002064696599280114, 'learning_rate': 6.5625, 'epoch': 0.03} + 3%|▎ | 15/520 [01:08<32:40, 3.88s/it] 3%|▎ | 16/520 [01:12<32:02, 3.81s/it] {'loss': 10.7165, 'grad_norm': 0.0011183525470300267, 'learning_rate': 7.0, 'epoch': 0.03} + 3%|▎ | 16/520 [01:12<32:02, 3.81s/it] 3%|▎ | 17/520 [01:16<31:34, 3.77s/it] {'loss': 9.8862, 'grad_norm': 0.0007677927936963055, 'learning_rate': 6.9999320052603835, 'epoch': 0.03} + 3%|▎ | 17/520 [01:16<31:34, 3.77s/it] 3%|▎ | 18/520 [01:19<31:16, 3.74s/it] {'loss': 9.3952, 'grad_norm': 0.0006326872289101598, 'learning_rate': 6.999728023683412, 'epoch': 0.03} + 3%|▎ | 18/520 [01:19<31:16, 3.74s/it] 4%|▎ | 19/520 [01:23<31:14, 3.74s/it] {'loss': 10.5096, 'grad_norm': 0.0004886025996842469, 'learning_rate': 6.999388063194613, 'epoch': 0.04} + 4%|▎ | 19/520 [01:23<31:14, 3.74s/it] 4%|▍ | 20/520 [01:27<31:04, 3.73s/it] {'loss': 9.3672, 'grad_norm': 0.0004625214935796637, 'learning_rate': 6.998912137002857, 'epoch': 0.04} + 4%|▍ | 20/520 [01:27<31:04, 3.73s/it] 4%|▍ | 21/520 [01:31<31:03, 3.73s/it] {'loss': 10.4747, 'grad_norm': 0.000520786426923069, 'learning_rate': 6.998300263599846, 'epoch': 0.04} + 4%|▍ | 21/520 [01:31<31:03, 3.73s/it] 4%|▍ | 22/520 [01:34<30:51, 3.72s/it] {'loss': 9.6064, 'grad_norm': 0.00047909207565951866, 'learning_rate': 6.997552466759395, 'epoch': 0.04} + 4%|▍ | 22/520 [01:34<30:51, 3.72s/it] 4%|▍ | 23/520 [01:38<30:42, 3.71s/it] {'loss': 9.4291, 'grad_norm': 0.00040097732421308325, 'learning_rate': 6.996668775536502, 'epoch': 0.04} + 4%|▍ | 23/520 [01:38<30:42, 3.71s/it] 5%|▍ | 24/520 [01:42<30:33, 3.70s/it] {'loss': 10.2067, 'grad_norm': 0.0003722887431080761, 'learning_rate': 6.995649224266228, 'epoch': 0.05} + 5%|▍ | 24/520 [01:42<30:33, 3.70s/it] 5%|▍ | 25/520 [01:45<30:24, 3.69s/it] {'loss': 9.6933, 'grad_norm': 0.0002793811346190348, 'learning_rate': 6.994493852562358, 'epoch': 0.05} + 5%|▍ | 25/520 [01:45<30:24, 3.69s/it] 5%|▌ | 26/520 [01:49<30:23, 3.69s/it] {'loss': 9.6456, 'grad_norm': 0.00020517205421351987, 'learning_rate': 6.993202705315862, 'epoch': 0.05} + 5%|▌ | 26/520 [01:49<30:23, 3.69s/it] 5%|▌ | 27/520 [01:53<30:18, 3.69s/it] {'loss': 9.2586, 'grad_norm': 0.00017884880820980508, 'learning_rate': 6.991775832693151, 'epoch': 0.05} + 5%|▌ | 27/520 [01:53<30:18, 3.69s/it] 5%|▌ | 28/520 [01:56<30:21, 3.70s/it] {'loss': 9.242, 'grad_norm': 0.00021814617724617358, 'learning_rate': 6.99021329013413, 'epoch': 0.05} + 5%|▌ | 28/520 [01:56<30:21, 3.70s/it] 6%|▌ | 29/520 [02:00<30:17, 3.70s/it] {'loss': 9.0944, 'grad_norm': 0.00018611784912967295, 'learning_rate': 6.988515138350043, 'epoch': 0.06} + 6%|▌ | 29/520 [02:00<30:17, 3.70s/it] 6%|▌ | 30/520 [02:04<30:13, 3.70s/it] {'loss': 10.2086, 'grad_norm': 0.00014903470780078468, 'learning_rate': 6.9866814433211095, 'epoch': 0.06} + 6%|▌ | 30/520 [02:04<30:13, 3.70s/it] 6%|▌ | 31/520 [02:08<30:10, 3.70s/it] {'loss': 8.8786, 'grad_norm': 0.00020083606630631024, 'learning_rate': 6.984712276293968, 'epoch': 0.06} + 6%|▌ | 31/520 [02:08<30:10, 3.70s/it] 6%|▌ | 32/520 [02:11<30:02, 3.69s/it] {'loss': 11.1829, 'grad_norm': 0.00018214569351281045, 'learning_rate': 6.982607713778905, 'epoch': 0.06} + 6%|▌ | 32/520 [02:11<30:02, 3.69s/it] 6%|▋ | 33/520 [02:15<29:57, 3.69s/it] {'loss': 9.4895, 'grad_norm': 0.0001814214823795566, 'learning_rate': 6.980367837546879, 'epoch': 0.06} + 6%|▋ | 33/520 [02:15<29:57, 3.69s/it] 7%|▋ | 34/520 [02:19<29:56, 3.70s/it] {'loss': 9.2031, 'grad_norm': 0.00018330534710684222, 'learning_rate': 6.9779927346263495, 'epoch': 0.07} + 7%|▋ | 34/520 [02:19<29:56, 3.70s/it] 7%|▋ | 35/520 [02:22<29:46, 3.68s/it] {'loss': 9.3554, 'grad_norm': 0.00014944171259843476, 'learning_rate': 6.975482497299888, 'epoch': 0.07} + 7%|▋ | 35/520 [02:22<29:46, 3.68s/it] 7%|▋ | 36/520 [02:26<29:47, 3.69s/it] {'loss': 9.8941, 'grad_norm': 0.00014660542173684443, 'learning_rate': 6.972837223100603, 'epoch': 0.07} + 7%|▋ | 36/520 [02:26<29:47, 3.69s/it] 7%|▋ | 37/520 [02:30<29:48, 3.70s/it] {'loss': 10.137, 'grad_norm': 0.0001642371513066651, 'learning_rate': 6.9700570148083365, 'epoch': 0.07} + 7%|▋ | 37/520 [02:30<29:48, 3.70s/it] 7%|▋ | 38/520 [02:33<29:39, 3.69s/it] {'loss': 9.8783, 'grad_norm': 0.00016156667188370064, 'learning_rate': 6.9671419804456844, 'epoch': 0.07} + 7%|▋ | 38/520 [02:33<29:39, 3.69s/it] 8%|▊ | 39/520 [02:37<29:51, 3.72s/it] {'loss': 9.2524, 'grad_norm': 0.0001797421847704053, 'learning_rate': 6.964092233273791, 'epoch': 0.07} + 8%|▊ | 39/520 [02:37<29:51, 3.72s/it] 8%|▊ | 40/520 [02:41<29:58, 3.75s/it] {'loss': 9.3602, 'grad_norm': 0.00014551996944465147, 'learning_rate': 6.960907891787949, 'epoch': 0.08} + 8%|▊ | 40/520 [02:41<29:58, 3.75s/it] 8%|▊ | 41/520 [02:45<30:03, 3.77s/it] {'loss': 9.4554, 'grad_norm': 0.00012497619369700405, 'learning_rate': 6.957589079713001, 'epoch': 0.08} + 8%|▊ | 41/520 [02:45<30:03, 3.77s/it] 8%|▊ | 42/520 [02:49<29:51, 3.75s/it] {'loss': 9.9303, 'grad_norm': 0.00011022958808387155, 'learning_rate': 6.954135925998524, 'epoch': 0.08} + 8%|▊ | 42/520 [02:49<29:51, 3.75s/it] 8%|▊ | 43/520 [02:52<29:38, 3.73s/it] {'loss': 10.4099, 'grad_norm': 0.00011896953512512239, 'learning_rate': 6.950548564813825, 'epoch': 0.08} + 8%|▊ | 43/520 [02:52<29:38, 3.73s/it] 8%|▊ | 44/520 [02:56<29:33, 3.73s/it] {'loss': 10.4274, 'grad_norm': 0.00010126934715288826, 'learning_rate': 6.946827135542728, 'epoch': 0.08} + 8%|▊ | 44/520 [02:56<29:33, 3.73s/it] 9%|▊ | 45/520 [03:00<29:27, 3.72s/it] {'loss': 9.1393, 'grad_norm': 0.00011482786364024721, 'learning_rate': 6.942971782778154, 'epoch': 0.09} + 9%|▊ | 45/520 [03:00<29:27, 3.72s/it] 9%|▉ | 46/520 [03:03<29:19, 3.71s/it] {'loss': 10.9857, 'grad_norm': 0.00010582213620529123, 'learning_rate': 6.93898265631651, 'epoch': 0.09} + 9%|▉ | 46/520 [03:03<29:19, 3.71s/it] 9%|▉ | 47/520 [03:07<29:31, 3.74s/it] {'loss': 9.5579, 'grad_norm': 9.209924504703499e-05, 'learning_rate': 6.934859911151857, 'epoch': 0.09} + 9%|▉ | 47/520 [03:07<29:31, 3.74s/it] 9%|▉ | 48/520 [03:11<29:35, 3.76s/it] {'loss': 9.3009, 'grad_norm': 8.739367004190879e-05, 'learning_rate': 6.930603707469904, 'epoch': 0.09} + 9%|▉ | 48/520 [03:11<29:35, 3.76s/it] 9%|▉ | 49/520 [03:15<29:16, 3.73s/it] {'loss': 9.3089, 'grad_norm': 8.519517256898897e-05, 'learning_rate': 6.92621421064177, 'epoch': 0.09} + 9%|▉ | 49/520 [03:15<29:16, 3.73s/it] 10%|▉ | 50/520 [03:18<29:31, 3.77s/it] {'loss': 9.2645, 'grad_norm': 8.593630544523566e-05, 'learning_rate': 6.9216915912175665, 'epoch': 0.1} + 10%|▉ | 50/520 [03:18<29:31, 3.77s/it] 10%|▉ | 51/520 [03:22<29:45, 3.81s/it] {'loss': 8.9911, 'grad_norm': 8.054549838064862e-05, 'learning_rate': 6.917036024919767, 'epoch': 0.1} + 10%|▉ | 51/520 [03:22<29:45, 3.81s/it] 10%|█ | 52/520 [03:26<30:02, 3.85s/it] {'loss': 9.7454, 'grad_norm': 9.551648638800395e-05, 'learning_rate': 6.912247692636383, 'epoch': 0.1} + 10%|█ | 52/520 [03:26<30:02, 3.85s/it] 10%|█ | 53/520 [03:30<30:13, 3.88s/it] {'loss': 9.6053, 'grad_norm': 9.50136512428481e-05, 'learning_rate': 6.907326780413931, 'epoch': 0.1} + 10%|█ | 53/520 [03:30<30:13, 3.88s/it] 10%|█ | 54/520 [03:34<30:16, 3.90s/it] {'loss': 8.9375, 'grad_norm': 9.727579527885848e-05, 'learning_rate': 6.90227347945021, 'epoch': 0.1} + 10%|█ | 54/520 [03:34<30:16, 3.90s/it] 11%|█ | 55/520 [03:38<30:04, 3.88s/it] {'loss': 9.3155, 'grad_norm': 7.811203336699996e-05, 'learning_rate': 6.897087986086868, 'epoch': 0.11} + 11%|█ | 55/520 [03:38<30:04, 3.88s/it] 11%|█ | 56/520 [03:42<29:52, 3.86s/it] {'loss': 9.6305, 'grad_norm': 8.481839147780753e-05, 'learning_rate': 6.891770501801773, 'epoch': 0.11} + 11%|█ | 56/520 [03:42<29:52, 3.86s/it] 11%|█ | 57/520 [03:46<29:24, 3.81s/it] {'loss': 9.1515, 'grad_norm': 7.772166154726557e-05, 'learning_rate': 6.886321233201187, 'epoch': 0.11} + 11%|█ | 57/520 [03:46<29:24, 3.81s/it] 11%|█ | 58/520 [03:49<29:06, 3.78s/it] {'loss': 9.4138, 'grad_norm': 9.543158222007719e-05, 'learning_rate': 6.880740392011739, 'epoch': 0.11} + 11%|█ | 58/520 [03:49<29:06, 3.78s/it] 11%|█▏ | 59/520 [03:53<28:47, 3.75s/it] {'loss': 9.9806, 'grad_norm': 0.00012707108381865967, 'learning_rate': 6.875028195072197, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:53<28:47, 3.75s/it] 12%|█▏ | 60/520 [03:57<28:37, 3.73s/it] {'loss': 9.6498, 'grad_norm': 7.083778161745506e-05, 'learning_rate': 6.8691848643250415, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:57<28:37, 3.73s/it] 12%|█▏ | 61/520 [04:00<28:23, 3.71s/it] {'loss': 10.5587, 'grad_norm': 8.415017235364224e-05, 'learning_rate': 6.863210626807849, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:00<28:23, 3.71s/it] 12%|█▏ | 62/520 [04:04<28:15, 3.70s/it] {'loss': 9.6387, 'grad_norm': 6.108094341345591e-05, 'learning_rate': 6.857105714644457, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:04<28:15, 3.70s/it] 12%|█▏ | 63/520 [04:08<28:14, 3.71s/it] {'loss': 9.2413, 'grad_norm': 8.257948566187161e-05, 'learning_rate': 6.850870365035963, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:08<28:14, 3.71s/it] 12%|█▏ | 64/520 [04:11<28:06, 3.70s/it] {'loss': 9.2167, 'grad_norm': 7.327875939067957e-05, 'learning_rate': 6.844504820251493, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:11<28:06, 3.70s/it] 12%|█▎ | 65/520 [04:15<28:02, 3.70s/it] {'loss': 9.6805, 'grad_norm': 0.00011522091129078533, 'learning_rate': 6.838009327618794, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:15<28:02, 3.70s/it] 13%|█▎ | 66/520 [04:19<27:49, 3.68s/it] {'loss': 9.6642, 'grad_norm': 0.00011525569470640713, 'learning_rate': 6.831384139514629, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:19<27:49, 3.68s/it] 13%|█▎ | 67/520 [04:22<27:43, 3.67s/it] {'loss': 9.175, 'grad_norm': 9.322575042364997e-05, 'learning_rate': 6.82462951335496, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:22<27:43, 3.67s/it] 13%|█▎ | 68/520 [04:26<27:41, 3.68s/it] {'loss': 8.867, 'grad_norm': 6.185809899446329e-05, 'learning_rate': 6.817745711584961, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:26<27:41, 3.68s/it] 13%|█▎ | 69/520 [04:30<27:36, 3.67s/it] {'loss': 8.9788, 'grad_norm': 7.155825872532827e-05, 'learning_rate': 6.8107330016688055, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:30<27:36, 3.67s/it] 13%|█▎ | 70/520 [04:33<27:31, 3.67s/it] {'loss': 9.2111, 'grad_norm': 5.83060083090526e-05, 'learning_rate': 6.803591656079287, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:33<27:31, 3.67s/it] 14%|█▎ | 71/520 [04:37<27:27, 3.67s/it] {'loss': 8.9185, 'grad_norm': 6.74188084848426e-05, 'learning_rate': 6.796321952287222, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:37<27:27, 3.67s/it] 14%|█▍ | 72/520 [04:41<27:18, 3.66s/it] {'loss': 9.3898, 'grad_norm': 6.285248138351992e-05, 'learning_rate': 6.788924172750679, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:41<27:18, 3.66s/it] 14%|█▍ | 73/520 [04:44<27:20, 3.67s/it] {'loss': 8.8877, 'grad_norm': 6.630567962908624e-05, 'learning_rate': 6.781398604903997, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:44<27:20, 3.67s/it] 14%|█▍ | 74/520 [04:48<27:28, 3.70s/it] {'loss': 9.5257, 'grad_norm': 5.4389773529815997e-05, 'learning_rate': 6.773745541146619, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:48<27:28, 3.70s/it] 14%|█▍ | 75/520 [04:52<27:36, 3.72s/it] {'loss': 8.5657, 'grad_norm': 6.630876093509728e-05, 'learning_rate': 6.765965278831732, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:52<27:36, 3.72s/it] 15%|█▍ | 76/520 [04:56<27:31, 3.72s/it] {'loss': 10.458, 'grad_norm': 7.324177939496126e-05, 'learning_rate': 6.758058120254715, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:56<27:31, 3.72s/it] 15%|█▍ | 77/520 [04:59<27:26, 3.72s/it] {'loss': 9.1257, 'grad_norm': 6.810289054990334e-05, 'learning_rate': 6.750024372641388, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:59<27:26, 3.72s/it] 15%|█▌ | 78/520 [05:03<27:12, 3.69s/it] {'loss': 8.8425, 'grad_norm': 7.843511693872099e-05, 'learning_rate': 6.7418643481360805, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:03<27:12, 3.69s/it] 15%|█▌ | 79/520 [05:07<27:06, 3.69s/it] {'loss': 9.2094, 'grad_norm': 5.09109501549518e-05, 'learning_rate': 6.733578363789504, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:07<27:06, 3.69s/it] 15%|█▌ | 80/520 [05:10<27:12, 3.71s/it] {'loss': 11.0026, 'grad_norm': 7.111752127078103e-05, 'learning_rate': 6.725166741546427, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:10<27:12, 3.71s/it] 16%|█▌ | 81/520 [05:14<27:23, 3.74s/it] {'loss': 10.0816, 'grad_norm': 6.250193254637626e-05, 'learning_rate': 6.716629808233172, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:14<27:23, 3.74s/it] 16%|█▌ | 82/520 [05:18<27:39, 3.79s/it] {'loss': 9.2745, 'grad_norm': 5.246027413312568e-05, 'learning_rate': 6.7079678955449165, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:18<27:39, 3.79s/it] 16%|█▌ | 83/520 [05:22<27:41, 3.80s/it] {'loss': 9.5656, 'grad_norm': 4.927178515122492e-05, 'learning_rate': 6.699181340032801, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:22<27:41, 3.80s/it] 16%|█▌ | 84/520 [05:26<27:44, 3.82s/it] {'loss': 9.4791, 'grad_norm': 6.457049201645931e-05, 'learning_rate': 6.690270483090856, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:26<27:44, 3.82s/it] 16%|█▋ | 85/520 [05:30<27:43, 3.83s/it] {'loss': 9.3407, 'grad_norm': 5.674939091344105e-05, 'learning_rate': 6.681235670942739, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:30<27:43, 3.83s/it] 17%|█▋ | 86/520 [05:33<27:40, 3.83s/it] {'loss': 9.9327, 'grad_norm': 6.06062261041111e-05, 'learning_rate': 6.672077254628276, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:33<27:40, 3.83s/it] 17%|█▋ | 87/520 [05:37<27:38, 3.83s/it] {'loss': 10.5242, 'grad_norm': 7.835598452553917e-05, 'learning_rate': 6.662795589989829, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:37<27:38, 3.83s/it] 17%|█▋ | 88/520 [05:41<27:39, 3.84s/it] {'loss': 10.9868, 'grad_norm': 0.00013511821502175904, 'learning_rate': 6.653391037658467, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:41<27:39, 3.84s/it] 17%|█▋ | 89/520 [05:45<27:40, 3.85s/it] {'loss': 9.4252, 'grad_norm': 6.148597766270794e-05, 'learning_rate': 6.643863963039955, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:45<27:40, 3.85s/it] 17%|█▋ | 90/520 [05:49<27:40, 3.86s/it] {'loss': 9.1163, 'grad_norm': 7.934613283267431e-05, 'learning_rate': 6.634214736300553, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:49<27:40, 3.86s/it] 18%|█▊ | 91/520 [05:53<27:34, 3.86s/it] {'loss': 9.4872, 'grad_norm': 0.00011335845873467092, 'learning_rate': 6.62444373235264, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:53<27:34, 3.86s/it] 18%|█▊ | 92/520 [05:57<27:32, 3.86s/it] {'loss': 9.124, 'grad_norm': 6.478791964734408e-05, 'learning_rate': 6.614551330840141, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:57<27:32, 3.86s/it] 18%|█▊ | 93/520 [06:01<27:26, 3.86s/it] {'loss': 9.2699, 'grad_norm': 6.564566838264831e-05, 'learning_rate': 6.604537916123776, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:01<27:26, 3.86s/it] 18%|█▊ | 94/520 [06:04<27:18, 3.85s/it] {'loss': 9.7334, 'grad_norm': 6.030287059662626e-05, 'learning_rate': 6.594403877266134, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:04<27:18, 3.85s/it] 18%|█▊ | 95/520 [06:08<27:13, 3.84s/it] {'loss': 9.4054, 'grad_norm': 4.80008377760042e-05, 'learning_rate': 6.584149608016548, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:08<27:13, 3.84s/it] 18%|█▊ | 96/520 [06:12<27:12, 3.85s/it] {'loss': 8.8674, 'grad_norm': 6.479947153940536e-05, 'learning_rate': 6.5737755067957995, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:12<27:12, 3.85s/it] 19%|█▊ | 97/520 [06:16<27:16, 3.87s/it] {'loss': 9.4184, 'grad_norm': 5.806209923902648e-05, 'learning_rate': 6.5632819766806385, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:16<27:16, 3.87s/it] 19%|█▉ | 98/520 [06:20<26:47, 3.81s/it] {'loss': 8.7596, 'grad_norm': 7.145874211081259e-05, 'learning_rate': 6.552669425388119, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:20<26:47, 3.81s/it] 19%|█▉ | 99/520 [06:23<26:31, 3.78s/it] {'loss': 9.5677, 'grad_norm': 5.1287713685562625e-05, 'learning_rate': 6.541938265259763, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:23<26:31, 3.78s/it] 19%|█▉ | 100/520 [06:27<26:36, 3.80s/it] {'loss': 10.3254, 'grad_norm': 9.423817955964745e-05, 'learning_rate': 6.531088913245536, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:27<26:36, 3.80s/it] 19%|█▉ | 101/520 [06:31<26:40, 3.82s/it] {'loss': 9.2095, 'grad_norm': 5.0446491878642564e-05, 'learning_rate': 6.520121790887646, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:31<26:40, 3.82s/it] 20%|█▉ | 102/520 [06:35<26:37, 3.82s/it] {'loss': 9.2772, 'grad_norm': 8.596133525234279e-05, 'learning_rate': 6.509037324304166, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:35<26:37, 3.82s/it] 20%|█▉ | 103/520 [06:39<26:20, 3.79s/it] {'loss': 8.5534, 'grad_norm': 7.409140195314128e-05, 'learning_rate': 6.497835944172481, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:39<26:20, 3.79s/it] 20%|██ | 104/520 [06:42<26:00, 3.75s/it] {'loss': 9.5395, 'grad_norm': 7.786548199003503e-05, 'learning_rate': 6.486518085712545, 'epoch': 0.2} + 20%|██ | 104/520 [06:42<26:00, 3.75s/it] 20%|██ | 105/520 [06:46<25:52, 3.74s/it] {'loss': 9.2227, 'grad_norm': 7.193732323476376e-05, 'learning_rate': 6.475084188669982, 'epoch': 0.2} + 20%|██ | 105/520 [06:46<25:52, 3.74s/it] 20%|██ | 106/520 [06:50<25:52, 3.75s/it] {'loss': 10.1493, 'grad_norm': 8.502732943700305e-05, 'learning_rate': 6.463534697298995, 'epoch': 0.2} + 20%|██ | 106/520 [06:50<25:52, 3.75s/it] 21%|██ | 107/520 [06:53<25:40, 3.73s/it] {'loss': 10.1853, 'grad_norm': 7.567662198398774e-05, 'learning_rate': 6.4518700603451, 'epoch': 0.21} + 21%|██ | 107/520 [06:53<25:40, 3.73s/it] 21%|██ | 108/520 [06:57<25:43, 3.75s/it] {'loss': 9.2599, 'grad_norm': 0.00010368175635437064, 'learning_rate': 6.4400907310277, 'epoch': 0.21} + 21%|██ | 108/520 [06:57<25:43, 3.75s/it] 21%|██ | 109/520 [07:01<26:07, 3.82s/it] {'loss': 10.3738, 'grad_norm': 0.00013364025619497913, 'learning_rate': 6.42819716702247, 'epoch': 0.21} + 21%|██ | 109/520 [07:01<26:07, 3.82s/it] 21%|██ | 110/520 [07:05<26:17, 3.85s/it] {'loss': 9.851, 'grad_norm': 0.00015628367763847564, 'learning_rate': 6.416189830443571, 'epoch': 0.21} + 21%|██ | 110/520 [07:05<26:17, 3.85s/it] 21%|██▏ | 111/520 [07:09<26:24, 3.87s/it] {'loss': 9.8466, 'grad_norm': 0.0002888117084591691, 'learning_rate': 6.404069187825706, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:09<26:24, 3.87s/it] 22%|██▏ | 112/520 [07:13<26:21, 3.88s/it] {'loss': 9.7658, 'grad_norm': 0.0002683913571059006, 'learning_rate': 6.391835710105982, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:13<26:21, 3.88s/it] 22%|██▏ | 113/520 [07:17<26:25, 3.90s/it] {'loss': 8.878, 'grad_norm': 0.00016065919289065628, 'learning_rate': 6.379489872605617, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:17<26:25, 3.90s/it] 22%|██▏ | 114/520 [07:21<26:26, 3.91s/it] {'loss': 9.8626, 'grad_norm': 0.00023181139284836748, 'learning_rate': 6.367032155011472, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:21<26:26, 3.91s/it] 22%|██▏ | 115/520 [07:25<26:22, 3.91s/it] {'loss': 10.2684, 'grad_norm': 0.00030467915656872324, 'learning_rate': 6.354463041357411, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:25<26:22, 3.91s/it] 22%|██▏ | 116/520 [07:29<26:25, 3.93s/it] {'loss': 9.6758, 'grad_norm': 0.00028442883622026803, 'learning_rate': 6.341783020005499, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:29<26:25, 3.93s/it] 22%|██▎ | 117/520 [07:33<26:12, 3.90s/it] {'loss': 9.6686, 'grad_norm': 0.00014985623050961516, 'learning_rate': 6.328992583627018, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:33<26:12, 3.90s/it] 23%|██▎ | 118/520 [07:36<25:48, 3.85s/it] {'loss': 9.0534, 'grad_norm': 7.675321777848553e-05, 'learning_rate': 6.316092229183339, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:36<25:48, 3.85s/it] 23%|██▎ | 119/520 [07:40<25:31, 3.82s/it] {'loss': 8.7529, 'grad_norm': 5.5661904938614686e-05, 'learning_rate': 6.303082457906598, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:40<25:31, 3.82s/it] 23%|██▎ | 120/520 [07:44<25:10, 3.78s/it] {'loss': 9.1962, 'grad_norm': 4.1685588956875746e-05, 'learning_rate': 6.289963775280229, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:44<25:10, 3.78s/it] 23%|██▎ | 121/520 [07:47<25:01, 3.76s/it] {'loss': 8.8993, 'grad_norm': 5.086045288513006e-05, 'learning_rate': 6.276736691019323, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:47<25:01, 3.76s/it] 23%|██▎ | 122/520 [07:51<24:50, 3.74s/it] {'loss': 8.8336, 'grad_norm': 4.76891640101111e-05, 'learning_rate': 6.263401719050824, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:51<24:50, 3.74s/it] 24%|██▎ | 123/520 [07:55<24:45, 3.74s/it] {'loss': 10.3109, 'grad_norm': 5.86958709229902e-05, 'learning_rate': 6.249959377493558, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:55<24:45, 3.74s/it] 24%|██▍ | 124/520 [07:59<24:35, 3.73s/it] {'loss': 9.8312, 'grad_norm': 4.394103822576825e-05, 'learning_rate': 6.2364101886381045, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:59<24:35, 3.73s/it] 24%|██▍ | 125/520 [08:02<24:31, 3.72s/it] {'loss': 9.3113, 'grad_norm': 4.287539110818808e-05, 'learning_rate': 6.222754678926502, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:02<24:31, 3.72s/it] 24%|██▍ | 126/520 [08:07<25:54, 3.94s/it] {'loss': 9.4385, 'grad_norm': 5.704945456783255e-05, 'learning_rate': 6.208993378931797, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:07<25:54, 3.94s/it] 24%|██▍ | 127/520 [08:10<25:26, 3.88s/it] {'loss': 9.5997, 'grad_norm': 4.672732719168869e-05, 'learning_rate': 6.19512682333742, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:10<25:26, 3.88s/it] 25%|██▍ | 128/520 [08:14<25:03, 3.84s/it] {'loss': 9.3866, 'grad_norm': 4.742044799820088e-05, 'learning_rate': 6.181155550916422, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:14<25:03, 3.84s/it] 25%|██▍ | 129/520 [08:18<24:51, 3.81s/it] {'loss': 8.6911, 'grad_norm': 4.362674903602945e-05, 'learning_rate': 6.1670801045105375, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:18<24:51, 3.81s/it] 25%|██▌ | 130/520 [08:22<24:30, 3.77s/it] {'loss': 9.6892, 'grad_norm': 4.112273240102769e-05, 'learning_rate': 6.152901031009086, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:22<24:30, 3.77s/it] 25%|██▌ | 131/520 [08:25<24:20, 3.75s/it] {'loss': 10.0408, 'grad_norm': 4.847146153631992e-05, 'learning_rate': 6.138618881327729, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:25<24:20, 3.75s/it] 25%|██▌ | 132/520 [08:29<24:10, 3.74s/it] {'loss': 9.5767, 'grad_norm': 4.193440611577111e-05, 'learning_rate': 6.12423421038707, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:29<24:10, 3.74s/it] 26%|██▌ | 133/520 [08:33<23:58, 3.72s/it] {'loss': 9.4458, 'grad_norm': 4.786019213471946e-05, 'learning_rate': 6.109747577091079, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:33<23:58, 3.72s/it] 26%|██▌ | 134/520 [08:36<23:55, 3.72s/it] {'loss': 9.3638, 'grad_norm': 2.4682111517359473e-05, 'learning_rate': 6.095159544305393, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:36<23:55, 3.72s/it] 26%|██▌ | 135/520 [08:40<23:47, 3.71s/it] {'loss': 9.8249, 'grad_norm': 3.228393480578594e-05, 'learning_rate': 6.080470678835434, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:40<23:47, 3.71s/it] 26%|██▌ | 136/520 [08:44<23:38, 3.69s/it] {'loss': 9.1198, 'grad_norm': 3.222319545452136e-05, 'learning_rate': 6.065681551404392, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:44<23:38, 3.69s/it] 26%|██▋ | 137/520 [08:48<23:40, 3.71s/it] {'loss': 9.4145, 'grad_norm': 3.299212697994606e-05, 'learning_rate': 6.05079273663105, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:48<23:40, 3.71s/it] 27%|██▋ | 138/520 [08:51<23:36, 3.71s/it] {'loss': 9.0978, 'grad_norm': 3.347497950069305e-05, 'learning_rate': 6.035804813007454, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:51<23:36, 3.71s/it] 27%|██▋ | 139/520 [08:55<23:33, 3.71s/it] {'loss': 9.5402, 'grad_norm': 4.671989542896267e-05, 'learning_rate': 6.020718362876443, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:55<23:33, 3.71s/it] 27%|██▋ | 140/520 [08:59<23:28, 3.71s/it] {'loss': 10.2022, 'grad_norm': 5.1737139272981056e-05, 'learning_rate': 6.005533972409014, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:59<23:28, 3.71s/it] 27%|██▋ | 141/520 [09:02<23:22, 3.70s/it] {'loss': 9.4995, 'grad_norm': 3.3763222221888326e-05, 'learning_rate': 5.990252231581556, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:02<23:22, 3.70s/it] 27%|██▋ | 142/520 [09:06<23:20, 3.70s/it] {'loss': 10.1758, 'grad_norm': 3.9859832269907416e-05, 'learning_rate': 5.974873734152916, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:06<23:20, 3.70s/it] 28%|██▊ | 143/520 [09:10<23:13, 3.70s/it] {'loss': 9.4875, 'grad_norm': 2.8536147280714194e-05, 'learning_rate': 5.959399077641342, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:10<23:13, 3.70s/it] 28%|██▊ | 144/520 [09:14<23:21, 3.73s/it] {'loss': 8.7999, 'grad_norm': 4.701757417121618e-05, 'learning_rate': 5.943828863301254, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:14<23:21, 3.73s/it] 28%|██▊ | 145/520 [09:17<23:41, 3.79s/it] {'loss': 9.0274, 'grad_norm': 3.3699654562785854e-05, 'learning_rate': 5.928163696099896, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:17<23:41, 3.79s/it] 28%|██▊ | 146/520 [09:21<23:41, 3.80s/it] {'loss': 10.3762, 'grad_norm': 4.3077394660152263e-05, 'learning_rate': 5.9124041846938145, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:21<23:41, 3.80s/it] 28%|██▊ | 147/520 [09:25<23:28, 3.78s/it] {'loss': 8.87, 'grad_norm': 3.85640586800382e-05, 'learning_rate': 5.896550941405227, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:25<23:28, 3.78s/it] 28%|██▊ | 148/520 [09:29<23:19, 3.76s/it] {'loss': 9.1246, 'grad_norm': 2.841130161339853e-05, 'learning_rate': 5.880604582198218, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:29<23:19, 3.76s/it] 29%|██▊ | 149/520 [09:32<23:05, 3.74s/it] {'loss': 9.1946, 'grad_norm': 5.953171477926271e-05, 'learning_rate': 5.864565726654812, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:32<23:05, 3.74s/it] 29%|██▉ | 150/520 [09:36<23:05, 3.74s/it] {'loss': 9.5467, 'grad_norm': 5.381183924374426e-05, 'learning_rate': 5.848434997950895, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:36<23:05, 3.74s/it] 29%|██▉ | 151/520 [09:40<23:22, 3.80s/it] {'loss': 8.9851, 'grad_norm': 4.3319237182844304e-05, 'learning_rate': 5.832213022832014, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:40<23:22, 3.80s/it] 29%|██▉ | 152/520 [09:44<23:30, 3.83s/it] {'loss': 9.1455, 'grad_norm': 3.9753803521265906e-05, 'learning_rate': 5.815900431589008, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:44<23:30, 3.83s/it] 29%|██▉ | 153/520 [09:48<23:38, 3.86s/it] {'loss': 9.0015, 'grad_norm': 3.4293474904511656e-05, 'learning_rate': 5.799497858033532, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:48<23:38, 3.86s/it] 30%|██▉ | 154/520 [09:52<23:39, 3.88s/it] {'loss': 9.4074, 'grad_norm': 3.4760056492456417e-05, 'learning_rate': 5.783005939473425, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:52<23:39, 3.88s/it] 30%|██▉ | 155/520 [09:56<23:39, 3.89s/it] {'loss': 9.503, 'grad_norm': 2.468559236929684e-05, 'learning_rate': 5.766425316687947, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:56<23:39, 3.89s/it] 30%|███ | 156/520 [10:00<23:39, 3.90s/it] {'loss': 9.597, 'grad_norm': 2.5226467189897095e-05, 'learning_rate': 5.749756633902887, 'epoch': 0.3} + 30%|███ | 156/520 [10:00<23:39, 3.90s/it] 30%|███ | 157/520 [10:04<23:36, 3.90s/it] {'loss': 10.4382, 'grad_norm': 3.36613393106302e-05, 'learning_rate': 5.7330005387655305, 'epoch': 0.3} + 30%|███ | 157/520 [10:04<23:36, 3.90s/it] 30%|███ | 158/520 [10:07<23:33, 3.91s/it] {'loss': 9.1425, 'grad_norm': 2.732368803463646e-05, 'learning_rate': 5.71615768231949, 'epoch': 0.3} + 30%|███ | 158/520 [10:08<23:33, 3.91s/it] 31%|███ | 159/520 [10:11<23:31, 3.91s/it] {'loss': 9.0763, 'grad_norm': 2.9070724781159264e-05, 'learning_rate': 5.699228718979415, 'epoch': 0.31} + 31%|███ | 159/520 [10:11<23:31, 3.91s/it] 31%|███ | 160/520 [10:15<23:32, 3.92s/it] {'loss': 9.1227, 'grad_norm': 1.924293464368943e-05, 'learning_rate': 5.682214306505568, 'epoch': 0.31} + 31%|███ | 160/520 [10:15<23:32, 3.92s/it] 31%|███ | 161/520 [10:19<23:33, 3.94s/it] {'loss': 9.2728, 'grad_norm': 2.7568329328422325e-05, 'learning_rate': 5.665115105978258, 'epoch': 0.31} + 31%|███ | 161/520 [10:19<23:33, 3.94s/it] 31%|███ | 162/520 [10:23<23:32, 3.95s/it] {'loss': 10.287, 'grad_norm': 2.5979721758019727e-05, 'learning_rate': 5.647931781772166, 'epoch': 0.31} + 31%|███ | 162/520 [10:23<23:32, 3.95s/it] 31%|███▏ | 163/520 [10:27<23:26, 3.94s/it] {'loss': 8.9361, 'grad_norm': 3.129817582873227e-05, 'learning_rate': 5.630665001530522, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:27<23:26, 3.94s/it] 32%|███▏ | 164/520 [10:31<23:28, 3.96s/it] {'loss': 8.7654, 'grad_norm': 3.3638265372414024e-05, 'learning_rate': 5.613315436139171, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:31<23:28, 3.96s/it] 32%|███▏ | 165/520 [10:35<23:19, 3.94s/it] {'loss': 9.1244, 'grad_norm': 2.275371232709504e-05, 'learning_rate': 5.595883759700501, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:35<23:19, 3.94s/it] 32%|███▏ | 166/520 [10:39<23:08, 3.92s/it] {'loss': 9.0091, 'grad_norm': 2.6839532922610137e-05, 'learning_rate': 5.578370649507255, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:39<23:08, 3.92s/it] 32%|███▏ | 167/520 [10:43<23:03, 3.92s/it] {'loss': 9.5184, 'grad_norm': 2.124942086355659e-05, 'learning_rate': 5.560776786016216, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:43<23:03, 3.92s/it] 32%|███▏ | 168/520 [10:47<22:49, 3.89s/it] {'loss': 9.0391, 'grad_norm': 2.2604000693802738e-05, 'learning_rate': 5.5431028528217645, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:47<22:49, 3.89s/it] 32%|███▎ | 169/520 [10:50<22:24, 3.83s/it] {'loss': 9.3019, 'grad_norm': 2.0647260792803352e-05, 'learning_rate': 5.525349536629321, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:50<22:24, 3.83s/it] 33%|███▎ | 170/520 [10:54<22:05, 3.79s/it] {'loss': 9.8345, 'grad_norm': 3.338840043765166e-05, 'learning_rate': 5.507517527228661, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:54<22:05, 3.79s/it] 33%|███▎ | 171/520 [10:58<21:51, 3.76s/it] {'loss': 8.8361, 'grad_norm': 3.3087343219080927e-05, 'learning_rate': 5.489607517467124, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:58<21:51, 3.76s/it] 33%|███▎ | 172/520 [11:02<21:40, 3.74s/it] {'loss': 9.2556, 'grad_norm': 2.1781605936483488e-05, 'learning_rate': 5.471620203222677, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:02<21:40, 3.74s/it] 33%|███▎ | 173/520 [11:05<21:31, 3.72s/it] {'loss': 8.8315, 'grad_norm': 2.4045236442895998e-05, 'learning_rate': 5.453556283376894, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:05<21:31, 3.72s/it] 33%|███▎ | 174/520 [11:09<21:33, 3.74s/it] {'loss': 9.4402, 'grad_norm': 2.5802265661886916e-05, 'learning_rate': 5.435416459787787, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:09<21:33, 3.74s/it] 34%|███▎ | 175/520 [11:13<21:25, 3.73s/it] {'loss': 8.9222, 'grad_norm': 2.2264470950179674e-05, 'learning_rate': 5.41720143726255, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:13<21:25, 3.73s/it] 34%|███▍ | 176/520 [11:16<21:20, 3.72s/it] {'loss': 10.4193, 'grad_norm': 3.004029388445704e-05, 'learning_rate': 5.398911923530158, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:16<21:20, 3.72s/it] 34%|███▍ | 177/520 [11:20<21:13, 3.71s/it] {'loss': 9.8762, 'grad_norm': 3.438524149337614e-05, 'learning_rate': 5.380548629213884, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:20<21:13, 3.71s/it] 34%|███▍ | 178/520 [11:24<21:09, 3.71s/it] {'loss': 9.3269, 'grad_norm': 2.3132070320313e-05, 'learning_rate': 5.362112267803678, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:24<21:09, 3.71s/it] 34%|███▍ | 179/520 [11:27<21:02, 3.70s/it] {'loss': 9.2024, 'grad_norm': 2.4655044616631133e-05, 'learning_rate': 5.3436035556284525, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:27<21:02, 3.70s/it] 35%|███▍ | 180/520 [11:31<21:10, 3.74s/it] {'loss': 9.3691, 'grad_norm': 1.819856071557589e-05, 'learning_rate': 5.325023211828243, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:31<21:10, 3.74s/it] 35%|███▍ | 181/520 [11:35<21:27, 3.80s/it] {'loss': 8.9613, 'grad_norm': 2.966879873949349e-05, 'learning_rate': 5.306371958326273, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:35<21:27, 3.80s/it] 35%|███▌ | 182/520 [11:39<21:26, 3.81s/it] {'loss': 9.344, 'grad_norm': 2.45127130536138e-05, 'learning_rate': 5.2876505198009, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:39<21:26, 3.81s/it] 35%|███▌ | 183/520 [11:43<21:08, 3.76s/it] {'loss': 8.9944, 'grad_norm': 3.14753820840268e-05, 'learning_rate': 5.268859623657458, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:43<21:08, 3.76s/it] 35%|███▌ | 184/520 [11:46<20:59, 3.75s/it] {'loss': 8.7882, 'grad_norm': 3.3931099900893795e-05, 'learning_rate': 5.25, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:46<20:59, 3.75s/it] 36%|███▌ | 185/520 [11:50<20:54, 3.75s/it] {'loss': 9.8297, 'grad_norm': 2.8470918066438948e-05, 'learning_rate': 5.231072381602926, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:50<20:54, 3.75s/it] 36%|███▌ | 186/520 [11:54<20:39, 3.71s/it] {'loss': 9.0648, 'grad_norm': 2.1367728617761087e-05, 'learning_rate': 5.212077503882513, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:54<20:39, 3.71s/it] 36%|███▌ | 187/520 [11:58<20:41, 3.73s/it] {'loss': 9.466, 'grad_norm': 2.8068695711119732e-05, 'learning_rate': 5.193016104868339, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:58<20:41, 3.73s/it] 36%|███▌ | 188/520 [12:01<20:47, 3.76s/it] {'loss': 9.0321, 'grad_norm': 2.326234474013613e-05, 'learning_rate': 5.173888925174614, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:01<20:47, 3.76s/it] 36%|███▋ | 189/520 [12:05<20:54, 3.79s/it] {'loss': 9.5034, 'grad_norm': 2.3008910299814427e-05, 'learning_rate': 5.154696707971395, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:05<20:54, 3.79s/it] 37%|███▋ | 190/520 [12:09<20:57, 3.81s/it] {'loss': 9.0834, 'grad_norm': 1.8895377405612764e-05, 'learning_rate': 5.135440198955717, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:09<20:57, 3.81s/it] 37%|███▋ | 191/520 [12:13<20:55, 3.82s/it] {'loss': 9.25, 'grad_norm': 1.8946498391582037e-05, 'learning_rate': 5.116120146322619, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:13<20:55, 3.82s/it] 37%|███▋ | 192/520 [12:17<20:58, 3.84s/it] {'loss': 9.6277, 'grad_norm': 1.8553804606710238e-05, 'learning_rate': 5.096737300736071, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:17<20:58, 3.84s/it] 37%|███▋ | 193/520 [12:21<20:51, 3.83s/it] {'loss': 10.0661, 'grad_norm': 2.351469469091732e-05, 'learning_rate': 5.077292415299809, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:21<20:51, 3.83s/it] 37%|███▋ | 194/520 [12:24<20:36, 3.79s/it] {'loss': 9.3357, 'grad_norm': 3.538605260356259e-05, 'learning_rate': 5.057786245528073, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:24<20:36, 3.79s/it] 38%|███▊ | 195/520 [12:28<20:22, 3.76s/it] {'loss': 9.0818, 'grad_norm': 2.3374567438817725e-05, 'learning_rate': 5.038219549316257, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:28<20:22, 3.76s/it] 38%|███▊ | 196/520 [12:32<20:08, 3.73s/it] {'loss': 9.3257, 'grad_norm': 1.9877893527954988e-05, 'learning_rate': 5.018593086911453, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:32<20:08, 3.73s/it] 38%|███▊ | 197/520 [12:35<20:00, 3.72s/it] {'loss': 8.9495, 'grad_norm': 2.2624407722603423e-05, 'learning_rate': 4.998907620882919, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:35<20:00, 3.72s/it] 38%|███▊ | 198/520 [12:39<20:00, 3.73s/it] {'loss': 9.5049, 'grad_norm': 1.9346994246835802e-05, 'learning_rate': 4.979163916092448, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:39<20:00, 3.73s/it] 38%|███▊ | 199/520 [12:43<19:54, 3.72s/it] {'loss': 9.1796, 'grad_norm': 2.4677220200118936e-05, 'learning_rate': 4.959362739664648, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:43<19:54, 3.72s/it] 38%|███▊ | 200/520 [12:47<19:53, 3.73s/it] {'loss': 9.6928, 'grad_norm': 3.523913317397178e-05, 'learning_rate': 4.9395048609571415, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:47<19:53, 3.73s/it] 39%|███▊ | 201/520 [12:50<19:48, 3.72s/it] {'loss': 9.5197, 'grad_norm': 3.490029519273701e-05, 'learning_rate': 4.919591051530663, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:50<19:48, 3.72s/it] 39%|███▉ | 202/520 [12:54<19:43, 3.72s/it] {'loss': 9.2657, 'grad_norm': 1.7661947513267515e-05, 'learning_rate': 4.899622085119093, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:54<19:43, 3.72s/it] 39%|███▉ | 203/520 [12:58<19:43, 3.73s/it] {'loss': 9.099, 'grad_norm': 2.255413392604692e-05, 'learning_rate': 4.879598737599388, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:58<19:43, 3.73s/it] 39%|███▉ | 204/520 [13:01<19:35, 3.72s/it] {'loss': 9.6495, 'grad_norm': 2.3631982709152152e-05, 'learning_rate': 4.859521786961432, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:01<19:35, 3.72s/it] 39%|███▉ | 205/520 [13:05<19:43, 3.76s/it] {'loss': 9.9804, 'grad_norm': 2.7358379463681744e-05, 'learning_rate': 4.8393920132778145, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:05<19:43, 3.76s/it] 40%|███▉ | 206/520 [13:09<19:37, 3.75s/it] {'loss': 9.6733, 'grad_norm': 1.968886693385456e-05, 'learning_rate': 4.8192101986735185, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:09<19:37, 3.75s/it] 40%|███▉ | 207/520 [13:13<19:31, 3.74s/it] {'loss': 9.9626, 'grad_norm': 3.05637217726849e-05, 'learning_rate': 4.798977127295533, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:13<19:31, 3.74s/it] 40%|████ | 208/520 [13:17<19:31, 3.75s/it] {'loss': 9.1687, 'grad_norm': 2.189607018793906e-05, 'learning_rate': 4.778693585282383, 'epoch': 0.4} + 40%|████ | 208/520 [13:17<19:31, 3.75s/it] 40%|████ | 209/520 [13:20<19:22, 3.74s/it] {'loss': 9.4047, 'grad_norm': 2.0774101447352053e-05, 'learning_rate': 4.758360360733587, 'epoch': 0.4} + 40%|████ | 209/520 [13:20<19:22, 3.74s/it] 40%|████ | 210/520 [13:24<19:20, 3.74s/it] {'loss': 9.2312, 'grad_norm': 1.5098337511274197e-05, 'learning_rate': 4.737978243679035, 'epoch': 0.4} + 40%|████ | 210/520 [13:24<19:20, 3.74s/it] 41%|████ | 211/520 [13:28<19:39, 3.82s/it] {'loss': 9.2761, 'grad_norm': 1.6136893363939736e-05, 'learning_rate': 4.717548026048295, 'epoch': 0.41} + 41%|████ | 211/520 [13:28<19:39, 3.82s/it] 41%|████ | 212/520 [13:32<19:44, 3.85s/it] {'loss': 8.7609, 'grad_norm': 1.957619660357686e-05, 'learning_rate': 4.697070501639841, 'epoch': 0.41} + 41%|████ | 212/520 [13:32<19:44, 3.85s/it] 41%|████ | 213/520 [13:36<19:50, 3.88s/it] {'loss': 9.6938, 'grad_norm': 2.1072813277474795e-05, 'learning_rate': 4.676546466090208, 'epoch': 0.41} + 41%|████ | 213/520 [13:36<19:50, 3.88s/it] 41%|████ | 214/520 [13:40<19:48, 3.88s/it] {'loss': 9.3442, 'grad_norm': 2.913581411932407e-05, 'learning_rate': 4.655976716843085, 'epoch': 0.41} + 41%|████ | 214/520 [13:40<19:48, 3.88s/it] 41%|████▏ | 215/520 [13:43<19:31, 3.84s/it] {'loss': 9.6695, 'grad_norm': 3.216065811554858e-05, 'learning_rate': 4.6353620531183255, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:43<19:31, 3.84s/it] 42%|████▏ | 216/520 [13:47<19:19, 3.81s/it] {'loss': 9.0616, 'grad_norm': 1.8098414165442598e-05, 'learning_rate': 4.6147032758808955, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:47<19:19, 3.81s/it] 42%|████▏ | 217/520 [13:51<19:08, 3.79s/it] {'loss': 9.1455, 'grad_norm': 2.0306286484261307e-05, 'learning_rate': 4.594001187809756, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:51<19:08, 3.79s/it] 42%|████▏ | 218/520 [13:55<18:55, 3.76s/it] {'loss': 9.5788, 'grad_norm': 1.8145383459891752e-05, 'learning_rate': 4.57325659326667, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:55<18:55, 3.76s/it] 42%|████▏ | 219/520 [13:58<18:47, 3.75s/it] {'loss': 8.7125, 'grad_norm': 2.5565517486968922e-05, 'learning_rate': 4.552470298264955, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:58<18:47, 3.75s/it] 42%|████▏ | 220/520 [14:02<18:40, 3.73s/it] {'loss': 9.9556, 'grad_norm': 3.147256809977448e-05, 'learning_rate': 4.531643110438165, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:02<18:40, 3.73s/it] 42%|████▎ | 221/520 [14:06<18:35, 3.73s/it] {'loss': 9.1236, 'grad_norm': 1.897029096808824e-05, 'learning_rate': 4.510775839008705, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:06<18:35, 3.73s/it] 43%|████▎ | 222/520 [14:10<18:30, 3.73s/it] {'loss': 8.7581, 'grad_norm': 2.296226939791959e-05, 'learning_rate': 4.489869294756396, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:10<18:30, 3.73s/it] 43%|████▎ | 223/520 [14:13<18:23, 3.72s/it] {'loss': 8.809, 'grad_norm': 1.812611042752663e-05, 'learning_rate': 4.4689242899869726, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:13<18:23, 3.72s/it] 43%|████▎ | 224/520 [14:17<18:22, 3.73s/it] {'loss': 11.3106, 'grad_norm': 3.435543247352131e-05, 'learning_rate': 4.447941638500518, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:17<18:22, 3.73s/it] 43%|████▎ | 225/520 [14:21<18:19, 3.73s/it] {'loss': 9.0181, 'grad_norm': 1.6646482959767576e-05, 'learning_rate': 4.426922155559845, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:21<18:19, 3.73s/it] 43%|████▎ | 226/520 [14:24<18:10, 3.71s/it] {'loss': 9.3978, 'grad_norm': 1.6823920265312825e-05, 'learning_rate': 4.405866657858823, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:24<18:10, 3.71s/it] 44%|████▎ | 227/520 [14:28<18:13, 3.73s/it] {'loss': 9.132, 'grad_norm': 1.6939922003763625e-05, 'learning_rate': 4.384775963490641, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:28<18:13, 3.73s/it] 44%|████▍ | 228/520 [14:32<18:21, 3.77s/it] {'loss': 10.5781, 'grad_norm': 2.0502019025685576e-05, 'learning_rate': 4.363650891916027, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:32<18:21, 3.77s/it] 44%|████▍ | 229/520 [14:36<18:34, 3.83s/it] {'loss': 9.1401, 'grad_norm': 1.7749326410678365e-05, 'learning_rate': 4.342492263931406, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:36<18:34, 3.83s/it] 44%|████▍ | 230/520 [14:40<18:35, 3.85s/it] {'loss': 9.1821, 'grad_norm': 2.993550297080935e-05, 'learning_rate': 4.321300901637004, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:40<18:35, 3.85s/it] 44%|████▍ | 231/520 [14:44<18:24, 3.82s/it] {'loss': 9.3304, 'grad_norm': 1.8478604196473914e-05, 'learning_rate': 4.300077628404914, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:44<18:24, 3.82s/it] 45%|████▍ | 232/520 [14:47<18:12, 3.79s/it] {'loss': 10.7862, 'grad_norm': 2.412461306285707e-05, 'learning_rate': 4.2788232688471, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:47<18:12, 3.79s/it] 45%|████▍ | 233/520 [14:51<18:05, 3.78s/it] {'loss': 10.0952, 'grad_norm': 3.1128943903813445e-05, 'learning_rate': 4.25753864878336, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:51<18:05, 3.78s/it] 45%|████▌ | 234/520 [14:55<18:01, 3.78s/it] {'loss': 8.8104, 'grad_norm': 1.5273177657383854e-05, 'learning_rate': 4.236224595209236, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:55<18:01, 3.78s/it] 45%|████▌ | 235/520 [14:59<17:50, 3.76s/it] {'loss': 9.1866, 'grad_norm': 1.6064621248572643e-05, 'learning_rate': 4.214881936263882, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:59<17:50, 3.76s/it] 45%|████▌ | 236/520 [15:02<17:43, 3.74s/it] {'loss': 9.9154, 'grad_norm': 2.9308275390227845e-05, 'learning_rate': 4.193511501197891, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:02<17:43, 3.74s/it] 46%|████▌ | 237/520 [15:06<17:41, 3.75s/it] {'loss': 9.4103, 'grad_norm': 2.247924709285742e-05, 'learning_rate': 4.172114120341077, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:06<17:41, 3.75s/it] 46%|████▌ | 238/520 [15:10<17:37, 3.75s/it] {'loss': 9.0226, 'grad_norm': 1.827799160789137e-05, 'learning_rate': 4.150690625070202, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:10<17:37, 3.75s/it] 46%|████▌ | 239/520 [15:14<17:30, 3.74s/it] {'loss': 9.8347, 'grad_norm': 2.504968868646699e-05, 'learning_rate': 4.129241847776685, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:14<17:30, 3.74s/it] 46%|████▌ | 240/520 [15:17<17:25, 3.73s/it] {'loss': 8.5859, 'grad_norm': 2.445239652209854e-05, 'learning_rate': 4.107768621834257, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:17<17:25, 3.73s/it] 46%|████▋ | 241/520 [15:21<17:20, 3.73s/it] {'loss': 8.9587, 'grad_norm': 1.7066163365956155e-05, 'learning_rate': 4.086271781566578, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:21<17:20, 3.73s/it] 47%|████▋ | 242/520 [15:25<17:15, 3.72s/it] {'loss': 9.1963, 'grad_norm': 1.4266470268198455e-05, 'learning_rate': 4.064752162214823, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:25<17:15, 3.72s/it] 47%|████▋ | 243/520 [15:28<17:14, 3.73s/it] {'loss': 8.7944, 'grad_norm': 1.771787606461237e-05, 'learning_rate': 4.043210599905231, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:28<17:14, 3.73s/it] 47%|████▋ | 244/520 [15:32<17:08, 3.73s/it] {'loss': 9.5021, 'grad_norm': 1.518141118671091e-05, 'learning_rate': 4.02164793161661, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:32<17:08, 3.73s/it] 47%|████▋ | 245/520 [15:36<17:15, 3.77s/it] {'loss': 8.8422, 'grad_norm': 4.790153581437182e-05, 'learning_rate': 4.00006499514783, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:36<17:15, 3.77s/it] 47%|████▋ | 246/520 [15:40<17:24, 3.81s/it] {'loss': 10.4231, 'grad_norm': 2.3710415450787296e-05, 'learning_rate': 3.978462629085257, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:40<17:24, 3.81s/it] 48%|████▊ | 247/520 [15:44<17:25, 3.83s/it] {'loss': 9.9076, 'grad_norm': 2.2315259331517604e-05, 'learning_rate': 3.956841672770181, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:44<17:25, 3.83s/it] 48%|████▊ | 248/520 [15:48<17:34, 3.88s/it] {'loss': 8.9143, 'grad_norm': 2.4859940602553433e-05, 'learning_rate': 3.935202966266199, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:48<17:34, 3.88s/it] 48%|████▊ | 249/520 [15:52<17:55, 3.97s/it] {'loss': 9.5374, 'grad_norm': 1.7683270785575854e-05, 'learning_rate': 3.913547350326575, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:52<17:55, 3.97s/it] 48%|████▊ | 250/520 [15:56<18:10, 4.04s/it] {'loss': 9.4276, 'grad_norm': 2.4957472368402374e-05, 'learning_rate': 3.8918756663615772, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:56<18:10, 4.04s/it] 48%|████▊ | 251/520 [16:00<18:01, 4.02s/it] {'loss': 9.7129, 'grad_norm': 1.7635715377153365e-05, 'learning_rate': 3.8701887564057826, 'epoch': 0.48} + 48%|████▊ | 251/520 [16:00<18:01, 4.02s/it] 48%|████▊ | 252/520 [16:04<17:45, 3.97s/it] {'loss': 10.0488, 'grad_norm': 2.3335986982573807e-05, 'learning_rate': 3.8484874630853585, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:04<17:45, 3.97s/it] 49%|████▊ | 253/520 [16:08<17:37, 3.96s/it] {'loss': 9.7462, 'grad_norm': 2.6865909145955152e-05, 'learning_rate': 3.826772629585327, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:08<17:37, 3.96s/it] 49%|████▉ | 254/520 [16:12<17:33, 3.96s/it] {'loss': 9.0308, 'grad_norm': 1.8065053762099438e-05, 'learning_rate': 3.8050450996168044, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:12<17:33, 3.96s/it] 49%|████▉ | 255/520 [16:16<17:44, 4.02s/it] {'loss': 9.327, 'grad_norm': 1.7705741559393058e-05, 'learning_rate': 3.783305717384212, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:16<17:44, 4.02s/it] 49%|████▉ | 256/520 [16:20<17:16, 3.93s/it] {'loss': 9.3573, 'grad_norm': 1.5797817711955065e-05, 'learning_rate': 3.761555327552485, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:20<17:16, 3.93s/it] 49%|████▉ | 257/520 [16:23<16:50, 3.84s/it] {'loss': 9.3157, 'grad_norm': 1.1787627871032087e-05, 'learning_rate': 3.739794775214248, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:23<16:50, 3.84s/it] 50%|████▉ | 258/520 [16:27<16:38, 3.81s/it] {'loss': 9.2917, 'grad_norm': 1.3039740152575197e-05, 'learning_rate': 3.718024905856983, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:27<16:38, 3.81s/it] 50%|████▉ | 259/520 [16:31<16:26, 3.78s/it] {'loss': 9.7672, 'grad_norm': 1.4054805228147664e-05, 'learning_rate': 3.6962465653301715, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:31<16:26, 3.78s/it] 50%|█████ | 260/520 [16:35<16:21, 3.78s/it] {'loss': 10.0923, 'grad_norm': 1.6823616338378126e-05, 'learning_rate': 3.67446059981244, 'epoch': 0.5} + 50%|█████ | 260/520 [16:35<16:21, 3.78s/it] 50%|█████ | 261/520 [16:38<16:15, 3.77s/it] {'loss': 10.212, 'grad_norm': 1.9048541331679906e-05, 'learning_rate': 3.6526678557786765, 'epoch': 0.5} + 50%|█████ | 261/520 [16:38<16:15, 3.77s/it] 50%|█████ | 262/520 [16:42<16:06, 3.74s/it] {'loss': 9.1762, 'grad_norm': 1.640526903714014e-05, 'learning_rate': 3.6308691799671404, 'epoch': 0.5} + 50%|█████ | 262/520 [16:42<16:06, 3.74s/it] 51%|█████ | 263/520 [16:46<15:59, 3.73s/it] {'loss': 10.2999, 'grad_norm': 1.4810495888112959e-05, 'learning_rate': 3.609065419346566, 'epoch': 0.51} + 51%|█████ | 263/520 [16:46<15:59, 3.73s/it] 51%|█████ | 264/520 [16:50<15:58, 3.74s/it] {'loss': 9.5479, 'grad_norm': 1.654359461004568e-05, 'learning_rate': 3.5872574210832555, 'epoch': 0.51} + 51%|█████ | 264/520 [16:50<15:58, 3.74s/it] 51%|█████ | 265/520 [16:53<15:51, 3.73s/it] {'loss': 9.4104, 'grad_norm': 2.9816997203916996e-05, 'learning_rate': 3.5654460325081576, 'epoch': 0.51} + 51%|█████ | 265/520 [16:53<15:51, 3.73s/it] 51%|█████ | 266/520 [16:57<15:46, 3.72s/it] {'loss': 8.312, 'grad_norm': 3.843592557508204e-05, 'learning_rate': 3.543632101083953, 'epoch': 0.51} + 51%|█████ | 266/520 [16:57<15:46, 3.72s/it] 51%|█████▏ | 267/520 [17:01<15:39, 3.71s/it] {'loss': 9.0989, 'grad_norm': 1.731782921986284e-05, 'learning_rate': 3.5218164743721174, 'epoch': 0.51} + 51%|█████▏ | 267/520 [17:01<15:39, 3.71s/it] 52%|█████▏ | 268/520 [17:04<15:35, 3.71s/it] {'loss': 10.681, 'grad_norm': 1.9168924107180426e-05, 'learning_rate': 3.5, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:04<15:35, 3.71s/it] 52%|█████▏ | 269/520 [17:08<15:33, 3.72s/it] {'loss': 9.5397, 'grad_norm': 2.3102743297153923e-05, 'learning_rate': 3.4781835256278826, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:08<15:33, 3.72s/it] 52%|█████▏ | 270/520 [17:12<15:27, 3.71s/it] {'loss': 9.6204, 'grad_norm': 3.5582483466173006e-05, 'learning_rate': 3.4563678989160476, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:12<15:27, 3.71s/it] 52%|█████▏ | 271/520 [17:15<15:20, 3.70s/it] {'loss': 9.9512, 'grad_norm': 2.0050976342430365e-05, 'learning_rate': 3.434553967491843, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:15<15:20, 3.70s/it] 52%|█████▏ | 272/520 [17:19<15:17, 3.70s/it] {'loss': 10.2982, 'grad_norm': 2.126740647024766e-05, 'learning_rate': 3.4127425789167454, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:19<15:17, 3.70s/it] 52%|█████▎ | 273/520 [17:23<15:10, 3.69s/it] {'loss': 10.4045, 'grad_norm': 2.582824467881023e-05, 'learning_rate': 3.390934580653435, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:23<15:10, 3.69s/it] 53%|█████▎ | 274/520 [17:26<15:05, 3.68s/it] {'loss': 9.0483, 'grad_norm': 2.161799240410538e-05, 'learning_rate': 3.3691308200328605, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:26<15:05, 3.68s/it] 53%|█████▎ | 275/520 [17:30<15:01, 3.68s/it] {'loss': 9.2896, 'grad_norm': 3.676563643434098e-05, 'learning_rate': 3.3473321442213244, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:30<15:01, 3.68s/it] 53%|█████▎ | 276/520 [17:34<14:55, 3.67s/it] {'loss': 9.7842, 'grad_norm': 1.6062766347826363e-05, 'learning_rate': 3.3255394001875596, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:34<14:55, 3.67s/it] 53%|█████▎ | 277/520 [17:37<14:55, 3.69s/it] {'loss': 10.3864, 'grad_norm': 1.738799302315625e-05, 'learning_rate': 3.3037534346698285, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:38<14:55, 3.69s/it] 53%|█████▎ | 278/520 [17:41<14:51, 3.68s/it] {'loss': 8.4722, 'grad_norm': 5.158834095721124e-05, 'learning_rate': 3.2819750941430175, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:41<14:51, 3.68s/it] 54%|█████▎ | 279/520 [17:45<14:47, 3.68s/it] {'loss': 9.7186, 'grad_norm': 5.2308201054758706e-05, 'learning_rate': 3.260205224785752, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:45<14:47, 3.68s/it] 54%|█████▍ | 280/520 [17:49<14:46, 3.69s/it] {'loss': 9.0798, 'grad_norm': 3.553600132894559e-05, 'learning_rate': 3.2384446724475153, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:49<14:46, 3.69s/it] 54%|█████▍ | 281/520 [17:52<14:40, 3.68s/it] {'loss': 9.5349, 'grad_norm': 1.6065460602698838e-05, 'learning_rate': 3.216694282615788, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:52<14:40, 3.68s/it] 54%|█████▍ | 282/520 [17:56<14:39, 3.70s/it] {'loss': 8.5943, 'grad_norm': 2.40356548131746e-05, 'learning_rate': 3.194954900383196, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:56<14:39, 3.70s/it] 54%|█████▍ | 283/520 [18:00<14:35, 3.70s/it] {'loss': 9.6874, 'grad_norm': 1.4871683006017533e-05, 'learning_rate': 3.173227370414673, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:00<14:35, 3.70s/it] 55%|█████▍ | 284/520 [18:03<14:32, 3.70s/it] {'loss': 10.0424, 'grad_norm': 2.181780639917976e-05, 'learning_rate': 3.151512536914642, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:03<14:32, 3.70s/it] 55%|█████▍ | 285/520 [18:07<14:28, 3.70s/it] {'loss': 9.0782, 'grad_norm': 1.685556779157706e-05, 'learning_rate': 3.1298112435942183, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:07<14:28, 3.70s/it] 55%|█████▌ | 286/520 [18:11<14:23, 3.69s/it] {'loss': 8.8366, 'grad_norm': 2.5653079958186997e-05, 'learning_rate': 3.1081243336384228, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:11<14:23, 3.69s/it] 55%|█████▌ | 287/520 [18:14<14:16, 3.68s/it] {'loss': 9.2729, 'grad_norm': 1.6797978039488023e-05, 'learning_rate': 3.0864526496734253, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:14<14:16, 3.68s/it] 55%|█████▌ | 288/520 [18:18<14:15, 3.69s/it] {'loss': 10.0362, 'grad_norm': 2.2001083027354006e-05, 'learning_rate': 3.064797033733803, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:18<14:15, 3.69s/it] 56%|█████▌ | 289/520 [18:22<14:17, 3.71s/it] {'loss': 9.1526, 'grad_norm': 1.7971985301317797e-05, 'learning_rate': 3.04315832722982, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:22<14:17, 3.71s/it] 56%|█████▌ | 290/520 [18:26<14:22, 3.75s/it] {'loss': 8.7822, 'grad_norm': 2.0247263821633184e-05, 'learning_rate': 3.0215373709147437, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:26<14:22, 3.75s/it] 56%|█████▌ | 291/520 [18:29<14:12, 3.72s/it] {'loss': 8.8763, 'grad_norm': 2.467359912151135e-05, 'learning_rate': 2.9999350048521705, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:29<14:12, 3.72s/it] 56%|█████▌ | 292/520 [18:33<14:04, 3.70s/it] {'loss': 9.5006, 'grad_norm': 2.5991848284158434e-05, 'learning_rate': 2.978352068383389, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:33<14:04, 3.70s/it] 56%|█████▋ | 293/520 [18:37<13:59, 3.70s/it] {'loss': 9.0676, 'grad_norm': 1.4534009961871168e-05, 'learning_rate': 2.9567894000947694, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:37<13:59, 3.70s/it] 57%|█████▋ | 294/520 [18:40<13:53, 3.69s/it] {'loss': 9.4765, 'grad_norm': 1.4273241671684228e-05, 'learning_rate': 2.9352478377851767, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:40<13:53, 3.69s/it] 57%|█████▋ | 295/520 [18:44<13:48, 3.68s/it] {'loss': 10.2811, 'grad_norm': 1.4985757844965954e-05, 'learning_rate': 2.913728218433423, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:44<13:48, 3.68s/it] 57%|█████▋ | 296/520 [18:48<13:47, 3.69s/it] {'loss': 8.6171, 'grad_norm': 1.7378145628850215e-05, 'learning_rate': 2.892231378165744, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:48<13:47, 3.69s/it] 57%|█████▋ | 297/520 [18:51<13:41, 3.68s/it] {'loss': 9.5866, 'grad_norm': 1.5645320181234594e-05, 'learning_rate': 2.8707581522233157, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:51<13:41, 3.68s/it] 57%|█████▋ | 298/520 [18:55<13:37, 3.68s/it] {'loss': 9.2265, 'grad_norm': 2.2728017497185006e-05, 'learning_rate': 2.849309374929799, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:55<13:37, 3.68s/it] 57%|█████▊ | 299/520 [18:59<13:33, 3.68s/it] {'loss': 10.2854, 'grad_norm': 2.220277220762316e-05, 'learning_rate': 2.8278858796589237, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:59<13:33, 3.68s/it] 58%|█████▊ | 300/520 [19:02<13:32, 3.69s/it] {'loss': 9.6062, 'grad_norm': 1.6374046368502482e-05, 'learning_rate': 2.8064884988021093, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:02<13:32, 3.69s/it] 58%|█████▊ | 301/520 [19:06<13:39, 3.74s/it] {'loss': 9.3433, 'grad_norm': 1.7972516498561406e-05, 'learning_rate': 2.7851180637361193, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:06<13:39, 3.74s/it] 58%|█████▊ | 302/520 [19:10<13:37, 3.75s/it] {'loss': 10.2579, 'grad_norm': 2.2754218106966325e-05, 'learning_rate': 2.7637754047907652, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:10<13:37, 3.75s/it] 58%|█████▊ | 303/520 [19:14<13:33, 3.75s/it] {'loss': 9.0021, 'grad_norm': 1.2175400860049318e-05, 'learning_rate': 2.7424613512166403, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:14<13:33, 3.75s/it] 58%|█████▊ | 304/520 [19:18<13:30, 3.75s/it] {'loss': 10.1101, 'grad_norm': 2.295455796378141e-05, 'learning_rate': 2.7211767311529, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:18<13:30, 3.75s/it] 59%|█████▊ | 305/520 [19:21<13:22, 3.73s/it] {'loss': 9.8874, 'grad_norm': 1.4405679573197545e-05, 'learning_rate': 2.699922371595087, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:21<13:22, 3.73s/it] 59%|█████▉ | 306/520 [19:25<13:21, 3.74s/it] {'loss': 9.6347, 'grad_norm': 1.5938827920916278e-05, 'learning_rate': 2.6786990983629977, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:25<13:21, 3.74s/it] 59%|█████▉ | 307/520 [19:29<13:24, 3.78s/it] {'loss': 9.2218, 'grad_norm': 1.7434203913229526e-05, 'learning_rate': 2.6575077360685952, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:29<13:24, 3.78s/it] 59%|█████▉ | 308/520 [19:33<13:14, 3.75s/it] {'loss': 9.4742, 'grad_norm': 1.9076267935100493e-05, 'learning_rate': 2.636349108083972, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:33<13:14, 3.75s/it] 59%|█████▉ | 309/520 [19:37<13:33, 3.86s/it] {'loss': 9.0295, 'grad_norm': 1.2692768667872163e-05, 'learning_rate': 2.615224036509358, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:37<13:33, 3.86s/it] 60%|█████▉ | 310/520 [19:40<13:21, 3.82s/it] {'loss': 9.132, 'grad_norm': 1.2090942838630056e-05, 'learning_rate': 2.594133342141177, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:40<13:21, 3.82s/it] 60%|█████▉ | 311/520 [19:44<13:09, 3.78s/it] {'loss': 9.2888, 'grad_norm': 1.2336640594219958e-05, 'learning_rate': 2.5730778444401543, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:44<13:09, 3.78s/it] 60%|██████ | 312/520 [19:48<13:07, 3.79s/it] {'loss': 8.9947, 'grad_norm': 1.4156474554496615e-05, 'learning_rate': 2.5520583614994825, 'epoch': 0.6} + 60%|██████ | 312/520 [19:48<13:07, 3.79s/it] 60%|██████ | 313/520 [19:52<13:00, 3.77s/it] {'loss': 8.3227, 'grad_norm': 2.457162140486539e-05, 'learning_rate': 2.5310757100130274, 'epoch': 0.6} + 60%|██████ | 313/520 [19:52<13:00, 3.77s/it] 60%|██████ | 314/520 [19:56<13:14, 3.86s/it] {'loss': 9.1651, 'grad_norm': 1.3730066147883641e-05, 'learning_rate': 2.5101307052436037, 'epoch': 0.6} + 60%|██████ | 314/520 [19:56<13:14, 3.86s/it] 61%|██████ | 315/520 [19:59<13:01, 3.81s/it] {'loss': 10.3662, 'grad_norm': 1.9679882772581386e-05, 'learning_rate': 2.489224160991296, 'epoch': 0.61} + 61%|██████ | 315/520 [19:59<13:01, 3.81s/it] 61%|██████ | 316/520 [20:04<13:28, 3.97s/it] {'loss': 9.0142, 'grad_norm': 1.66005372890534e-05, 'learning_rate': 2.468356889561835, 'epoch': 0.61} + 61%|██████ | 316/520 [20:04<13:28, 3.97s/it] 61%|██████ | 317/520 [20:08<13:22, 3.95s/it] {'loss': 8.4252, 'grad_norm': 1.9738975612651253e-05, 'learning_rate': 2.4475297017350446, 'epoch': 0.61} + 61%|██████ | 317/520 [20:08<13:22, 3.95s/it] 61%|██████ | 318/520 [20:12<13:15, 3.94s/it] {'loss': 9.9137, 'grad_norm': 1.4775147003525103e-05, 'learning_rate': 2.4267434067333307, 'epoch': 0.61} + 61%|██████ | 318/520 [20:12<13:15, 3.94s/it] 61%|██████▏ | 319/520 [20:16<13:25, 4.01s/it] {'loss': 8.8518, 'grad_norm': 2.9543237936381252e-05, 'learning_rate': 2.4059988121902447, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:16<13:25, 4.01s/it] 62%|██████▏ | 320/520 [20:19<13:04, 3.92s/it] {'loss': 9.1891, 'grad_norm': 1.850357793981783e-05, 'learning_rate': 2.385296724119105, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:19<13:04, 3.92s/it] 62%|██████▏ | 321/520 [20:23<12:47, 3.86s/it] {'loss': 9.3562, 'grad_norm': 1.9381493974418542e-05, 'learning_rate': 2.3646379468816754, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:23<12:47, 3.86s/it] 62%|██████▏ | 322/520 [20:27<12:35, 3.82s/it] {'loss': 10.0868, 'grad_norm': 2.3951442081428232e-05, 'learning_rate': 2.3440232831569165, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:27<12:35, 3.82s/it] 62%|██████▏ | 323/520 [20:31<12:25, 3.78s/it] {'loss': 10.1341, 'grad_norm': 1.9654236822890995e-05, 'learning_rate': 2.323453533909793, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:31<12:25, 3.78s/it] 62%|██████▏ | 324/520 [20:34<12:15, 3.75s/it] {'loss': 9.1079, 'grad_norm': 2.383902435556213e-05, 'learning_rate': 2.3029294983601596, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:34<12:15, 3.75s/it] 62%|██████▎ | 325/520 [20:38<12:09, 3.74s/it] {'loss': 9.4374, 'grad_norm': 2.3886256938105227e-05, 'learning_rate': 2.282451973951704, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:38<12:09, 3.74s/it] 63%|██████▎ | 326/520 [20:42<12:01, 3.72s/it] {'loss': 9.4785, 'grad_norm': 1.5011457219835847e-05, 'learning_rate': 2.2620217563209652, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:42<12:01, 3.72s/it] 63%|██████▎ | 327/520 [20:45<11:56, 3.71s/it] {'loss': 10.1857, 'grad_norm': 2.1437604378968457e-05, 'learning_rate': 2.2416396392664137, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:45<11:56, 3.71s/it] 63%|██████▎ | 328/520 [20:49<11:52, 3.71s/it] {'loss': 9.6887, 'grad_norm': 1.4975142188789698e-05, 'learning_rate': 2.2213064147176174, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:49<11:52, 3.71s/it] 63%|██████▎ | 329/520 [20:53<11:47, 3.71s/it] {'loss': 8.6675, 'grad_norm': 2.049378746074687e-05, 'learning_rate': 2.2010228727044674, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:53<11:47, 3.71s/it] 63%|██████▎ | 330/520 [20:57<11:46, 3.72s/it] {'loss': 9.3662, 'grad_norm': 1.237372692789009e-05, 'learning_rate': 2.1807898013264815, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:57<11:46, 3.72s/it] 64%|██████▎ | 331/520 [21:00<11:41, 3.71s/it] {'loss': 9.2912, 'grad_norm': 1.4931834967978854e-05, 'learning_rate': 2.160607986722186, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:00<11:41, 3.71s/it] 64%|██████▍ | 332/520 [21:04<11:39, 3.72s/it] {'loss': 10.1922, 'grad_norm': 2.469175490637177e-05, 'learning_rate': 2.1404782130385684, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:04<11:39, 3.72s/it] 64%|██████▍ | 333/520 [21:08<11:34, 3.71s/it] {'loss': 9.936, 'grad_norm': 1.6458160611140585e-05, 'learning_rate': 2.1204012624006126, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:08<11:34, 3.71s/it] 64%|██████▍ | 334/520 [21:11<11:30, 3.71s/it] {'loss': 9.1497, 'grad_norm': 1.609847714533789e-05, 'learning_rate': 2.100377914880907, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:11<11:30, 3.71s/it] 64%|██████▍ | 335/520 [21:15<11:25, 3.71s/it] {'loss': 9.2534, 'grad_norm': 1.6237097020765385e-05, 'learning_rate': 2.080408948469338, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:15<11:25, 3.71s/it] 65%|██████▍ | 336/520 [21:19<11:23, 3.71s/it] {'loss': 9.0602, 'grad_norm': 1.823696887751274e-05, 'learning_rate': 2.0604951390428603, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:19<11:23, 3.71s/it] 65%|██████▍ | 337/520 [21:22<11:19, 3.71s/it] {'loss': 9.2501, 'grad_norm': 1.3625578704285707e-05, 'learning_rate': 2.0406372603353526, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:23<11:19, 3.71s/it] 65%|██████▌ | 338/520 [21:26<11:19, 3.73s/it] {'loss': 9.2351, 'grad_norm': 1.0929681434833453e-05, 'learning_rate': 2.0208360839075525, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:26<11:19, 3.73s/it] 65%|██████▌ | 339/520 [21:30<11:14, 3.72s/it] {'loss': 9.545, 'grad_norm': 1.095907016524604e-05, 'learning_rate': 2.00109237911708, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:30<11:14, 3.72s/it] 65%|██████▌ | 340/520 [21:34<11:08, 3.72s/it] {'loss': 9.0095, 'grad_norm': 1.1314897977122304e-05, 'learning_rate': 1.9814069130885468, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:34<11:08, 3.72s/it] 66%|██████▌ | 341/520 [21:37<11:04, 3.71s/it] {'loss': 9.2867, 'grad_norm': 1.2441585543220027e-05, 'learning_rate': 1.9617804506837422, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:37<11:04, 3.71s/it] 66%|██████▌ | 342/520 [21:41<10:58, 3.70s/it] {'loss': 10.441, 'grad_norm': 1.24849503299522e-05, 'learning_rate': 1.9422137544719265, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:41<10:58, 3.70s/it] 66%|██████▌ | 343/520 [21:45<10:53, 3.69s/it] {'loss': 9.984, 'grad_norm': 1.6230119920779543e-05, 'learning_rate': 1.922707584700191, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:45<10:53, 3.69s/it] 66%|██████▌ | 344/520 [21:48<10:48, 3.69s/it] {'loss': 8.966, 'grad_norm': 1.4203322810339107e-05, 'learning_rate': 1.9032626992639294, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:48<10:48, 3.69s/it] 66%|██████▋ | 345/520 [21:52<10:43, 3.68s/it] {'loss': 9.3697, 'grad_norm': 9.681770044819157e-06, 'learning_rate': 1.883879853677382, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:52<10:43, 3.68s/it] 67%|██████▋ | 346/520 [21:56<10:41, 3.69s/it] {'loss': 10.1482, 'grad_norm': 1.995392472577551e-05, 'learning_rate': 1.8645598010442828, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:56<10:41, 3.69s/it] 67%|██████▋ | 347/520 [21:59<10:39, 3.70s/it] {'loss': 8.4853, 'grad_norm': 1.8648077066021403e-05, 'learning_rate': 1.845303292028606, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:59<10:39, 3.70s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:03<10:36, 3.70s/it] {'loss': 9.5982, 'grad_norm': 2.3909160461820468e-05, 'learning_rate': 1.8261110748253873, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:03<10:36, 3.70s/it] 67%|██████▋ | 349/520 [22:07<10:33, 3.71s/it] {'loss': 9.8018, 'grad_norm': 1.4777895280288827e-05, 'learning_rate': 1.8069838951316606, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:07<10:33, 3.71s/it] 67%|██████▋ | 350/520 [22:11<10:31, 3.72s/it] {'loss': 9.3296, 'grad_norm': 1.0288645731769996e-05, 'learning_rate': 1.7879224961174887, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:11<10:31, 3.72s/it] 68%|██████▊ | 351/520 [22:14<10:30, 3.73s/it] {'loss': 8.9389, 'grad_norm': 1.1887655154197201e-05, 'learning_rate': 1.7689276183970741, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:14<10:30, 3.73s/it] 68%|██████▊ | 352/520 [22:18<10:26, 3.73s/it] {'loss': 9.3414, 'grad_norm': 1.1754864771913147e-05, 'learning_rate': 1.7500000000000009, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:18<10:26, 3.73s/it] 68%|██████▊ | 353/520 [22:22<10:23, 3.73s/it] {'loss': 9.7346, 'grad_norm': 2.2334982366331324e-05, 'learning_rate': 1.7311403763425435, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:22<10:23, 3.73s/it] 68%|██████▊ | 354/520 [22:26<10:20, 3.74s/it] {'loss': 10.2455, 'grad_norm': 1.7067369679018328e-05, 'learning_rate': 1.7123494801991013, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:26<10:20, 3.74s/it] 68%|██████▊ | 355/520 [22:29<10:16, 3.73s/it] {'loss': 9.0337, 'grad_norm': 1.0720340989608946e-05, 'learning_rate': 1.6936280416737264, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:29<10:16, 3.73s/it] 68%|██████▊ | 356/520 [22:33<10:11, 3.73s/it] {'loss': 9.2693, 'grad_norm': 1.3182691892506006e-05, 'learning_rate': 1.674976788171757, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:33<10:11, 3.73s/it] 69%|██████▊ | 357/520 [22:37<10:08, 3.74s/it] {'loss': 8.6275, 'grad_norm': 1.8676613747231953e-05, 'learning_rate': 1.6563964443715473, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:37<10:08, 3.74s/it] 69%|██████▉ | 358/520 [22:41<10:04, 3.73s/it] {'loss': 9.0393, 'grad_norm': 1.3806267989827229e-05, 'learning_rate': 1.6378877321963223, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:41<10:04, 3.73s/it] 69%|██████▉ | 359/520 [22:44<10:02, 3.74s/it] {'loss': 10.0346, 'grad_norm': 2.2490623649827418e-05, 'learning_rate': 1.619451370786116, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:44<10:02, 3.74s/it] 69%|██████▉ | 360/520 [22:48<10:04, 3.78s/it] {'loss': 10.3909, 'grad_norm': 2.7851408597291247e-05, 'learning_rate': 1.6010880764698423, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:48<10:04, 3.78s/it] 69%|██████▉ | 361/520 [22:52<09:57, 3.76s/it] {'loss': 10.0706, 'grad_norm': 3.8386731212163425e-05, 'learning_rate': 1.5827985627374508, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:52<09:57, 3.76s/it] 70%|██████▉ | 362/520 [22:56<09:50, 3.74s/it] {'loss': 9.0446, 'grad_norm': 1.1599924168531868e-05, 'learning_rate': 1.564583540212212, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:56<09:50, 3.74s/it] 70%|██████▉ | 363/520 [22:59<09:46, 3.73s/it] {'loss': 9.4749, 'grad_norm': 1.2312834022293097e-05, 'learning_rate': 1.5464437166231066, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:59<09:46, 3.73s/it] 70%|███████ | 364/520 [23:03<09:41, 3.73s/it] {'loss': 10.2903, 'grad_norm': 2.0229882772655436e-05, 'learning_rate': 1.5283797967773227, 'epoch': 0.7} + 70%|███████ | 364/520 [23:03<09:41, 3.73s/it] 70%|███████ | 365/520 [23:07<09:36, 3.72s/it] {'loss': 9.6303, 'grad_norm': 1.5016730501334898e-05, 'learning_rate': 1.5103924825328772, 'epoch': 0.7} + 70%|███████ | 365/520 [23:07<09:36, 3.72s/it] 70%|███████ | 366/520 [23:10<09:31, 3.71s/it] {'loss': 9.425, 'grad_norm': 1.1262137585677733e-05, 'learning_rate': 1.4924824727713397, 'epoch': 0.7} + 70%|███████ | 366/520 [23:10<09:31, 3.71s/it] 71%|███████ | 367/520 [23:14<09:27, 3.71s/it] {'loss': 9.6614, 'grad_norm': 1.1187533616423948e-05, 'learning_rate': 1.47465046337068, 'epoch': 0.71} + 71%|███████ | 367/520 [23:14<09:27, 3.71s/it] 71%|███████ | 368/520 [23:18<09:26, 3.73s/it] {'loss': 8.9394, 'grad_norm': 1.3265943269776208e-05, 'learning_rate': 1.4568971471782364, 'epoch': 0.71} + 71%|███████ | 368/520 [23:18<09:26, 3.73s/it] 71%|███████ | 369/520 [23:22<09:21, 3.72s/it] {'loss': 9.5745, 'grad_norm': 2.0850991647860708e-05, 'learning_rate': 1.4392232139837837, 'epoch': 0.71} + 71%|███████ | 369/520 [23:22<09:21, 3.72s/it] 71%|███████ | 370/520 [23:25<09:17, 3.72s/it] {'loss': 9.0849, 'grad_norm': 9.102714077702352e-06, 'learning_rate': 1.4216293504927449, 'epoch': 0.71} + 71%|███████ | 370/520 [23:25<09:17, 3.72s/it] 71%|███████▏ | 371/520 [23:29<09:15, 3.73s/it] {'loss': 9.3567, 'grad_norm': 9.51492733991401e-06, 'learning_rate': 1.404116240299499, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:29<09:15, 3.73s/it] 72%|███████▏ | 372/520 [23:33<09:14, 3.75s/it] {'loss': 10.3614, 'grad_norm': 1.2215302513509281e-05, 'learning_rate': 1.3866845638608285, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:33<09:14, 3.75s/it] 72%|███████▏ | 373/520 [23:37<09:10, 3.75s/it] {'loss': 10.1203, 'grad_norm': 1.6719297342948766e-05, 'learning_rate': 1.3693349984694776, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:37<09:10, 3.75s/it] 72%|███████▏ | 374/520 [23:40<09:05, 3.74s/it] {'loss': 9.184, 'grad_norm': 8.65938635312189e-06, 'learning_rate': 1.3520682182278345, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:40<09:05, 3.74s/it] 72%|███████▏ | 375/520 [23:44<09:02, 3.74s/it] {'loss': 8.7882, 'grad_norm': 9.780989973821192e-06, 'learning_rate': 1.3348848940217413, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:44<09:02, 3.74s/it] 72%|███████▏ | 376/520 [23:48<08:59, 3.74s/it] {'loss': 9.229, 'grad_norm': 1.1762580858728077e-05, 'learning_rate': 1.3177856934944328, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:48<08:59, 3.74s/it] 72%|███████▎ | 377/520 [23:52<08:55, 3.75s/it] {'loss': 9.2537, 'grad_norm': 8.930289616507055e-06, 'learning_rate': 1.3007712810205845, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:52<08:55, 3.75s/it] 73%|███████▎ | 378/520 [23:55<08:52, 3.75s/it] {'loss': 9.5701, 'grad_norm': 1.3081936166016002e-05, 'learning_rate': 1.2838423176805112, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:55<08:52, 3.75s/it] 73%|███████▎ | 379/520 [23:59<08:49, 3.76s/it] {'loss': 9.5577, 'grad_norm': 1.3344094046402135e-05, 'learning_rate': 1.2669994612344704, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:59<08:49, 3.76s/it] 73%|███████▎ | 380/520 [24:03<08:42, 3.73s/it] {'loss': 10.1995, 'grad_norm': 1.261422642111378e-05, 'learning_rate': 1.2502433660971122, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:03<08:42, 3.73s/it] 73%|███████▎ | 381/520 [24:07<08:42, 3.76s/it] {'loss': 9.4472, 'grad_norm': 1.0907168824601577e-05, 'learning_rate': 1.233574683312054, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:07<08:42, 3.76s/it] 73%|███████▎ | 382/520 [24:10<08:37, 3.75s/it] {'loss': 10.3206, 'grad_norm': 1.5656679804163154e-05, 'learning_rate': 1.216994060526577, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:10<08:37, 3.75s/it] 74%|███████▎ | 383/520 [24:14<08:35, 3.77s/it] {'loss': 8.967, 'grad_norm': 1.9344967269274798e-05, 'learning_rate': 1.2005021419664688, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:14<08:35, 3.77s/it] 74%|███████▍ | 384/520 [24:18<08:38, 3.81s/it] {'loss': 11.2093, 'grad_norm': 2.894693181664106e-05, 'learning_rate': 1.184099568410993, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:18<08:38, 3.81s/it] 74%|███████▍ | 385/520 [24:22<08:32, 3.80s/it] {'loss': 9.3039, 'grad_norm': 1.3218209336900099e-05, 'learning_rate': 1.1677869771679863, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:22<08:32, 3.80s/it] 74%|███████▍ | 386/520 [24:26<08:25, 3.77s/it] {'loss': 8.7251, 'grad_norm': 1.3138395693019719e-05, 'learning_rate': 1.1515650020491053, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:26<08:25, 3.77s/it] 74%|███████▍ | 387/520 [24:29<08:19, 3.75s/it] {'loss': 10.6691, 'grad_norm': 1.5277611833268558e-05, 'learning_rate': 1.1354342733451892, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:29<08:19, 3.75s/it] 75%|███████▍ | 388/520 [24:33<08:11, 3.73s/it] {'loss': 8.9764, 'grad_norm': 1.5396252059536485e-05, 'learning_rate': 1.1193954178017815, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:33<08:11, 3.73s/it] 75%|███████▍ | 389/520 [24:37<08:06, 3.71s/it] {'loss': 9.4886, 'grad_norm': 1.6559236100873697e-05, 'learning_rate': 1.1034490585947727, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:37<08:06, 3.71s/it] 75%|███████▌ | 390/520 [24:40<08:01, 3.70s/it] {'loss': 9.2535, 'grad_norm': 1.0965404471556004e-05, 'learning_rate': 1.0875958153061855, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:40<08:01, 3.70s/it] 75%|███████▌ | 391/520 [24:44<07:57, 3.70s/it] {'loss': 9.7696, 'grad_norm': 2.24671563837101e-05, 'learning_rate': 1.0718363039001042, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:44<07:57, 3.70s/it] 75%|███████▌ | 392/520 [24:48<07:55, 3.72s/it] {'loss': 8.9323, 'grad_norm': 1.0926094703673528e-05, 'learning_rate': 1.0561711366987454, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:48<07:55, 3.72s/it] 76%|███████▌ | 393/520 [24:52<08:00, 3.78s/it] {'loss': 9.7818, 'grad_norm': 1.7044549971912732e-05, 'learning_rate': 1.0406009223586579, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:52<08:00, 3.78s/it] 76%|███████▌ | 394/520 [24:56<08:03, 3.84s/it] {'loss': 9.2182, 'grad_norm': 1.6212143568846112e-05, 'learning_rate': 1.0251262658470839, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:56<08:03, 3.84s/it] 76%|███████▌ | 395/520 [24:59<07:53, 3.79s/it] {'loss': 9.1056, 'grad_norm': 2.210208313813721e-05, 'learning_rate': 1.0097477684184453, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:59<07:53, 3.79s/it] 76%|███████▌ | 396/520 [25:03<07:43, 3.74s/it] {'loss': 9.4606, 'grad_norm': 1.0536440541826465e-05, 'learning_rate': 0.9944660275909855, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:03<07:43, 3.74s/it] 76%|███████▋ | 397/520 [25:07<07:38, 3.72s/it] {'loss': 9.4492, 'grad_norm': 1.0214512508822898e-05, 'learning_rate': 0.9792816371235576, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:07<07:38, 3.72s/it] 77%|███████▋ | 398/520 [25:10<07:32, 3.71s/it] {'loss': 9.6278, 'grad_norm': 1.197831005254515e-05, 'learning_rate': 0.9641951869925457, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:10<07:32, 3.71s/it] 77%|███████▋ | 399/520 [25:14<07:27, 3.70s/it] {'loss': 10.0354, 'grad_norm': 1.8159862798681865e-05, 'learning_rate': 0.9492072633689508, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:14<07:27, 3.70s/it] 77%|███████▋ | 400/520 [25:18<07:24, 3.70s/it] {'loss': 9.8419, 'grad_norm': 1.7243137736692532e-05, 'learning_rate': 0.9343184485956086, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:18<07:24, 3.70s/it] 77%|███████▋ | 401/520 [25:21<07:19, 3.69s/it] {'loss': 8.2838, 'grad_norm': 1.9967653909030053e-05, 'learning_rate': 0.9195293211645661, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:21<07:19, 3.69s/it] 77%|███████▋ | 402/520 [25:25<07:15, 3.69s/it] {'loss': 8.8199, 'grad_norm': 3.071035143903035e-05, 'learning_rate': 0.9048404556946064, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:25<07:15, 3.69s/it] 78%|███████▊ | 403/520 [25:29<07:13, 3.71s/it] {'loss': 9.0548, 'grad_norm': 9.935609293346686e-06, 'learning_rate': 0.8902524229089204, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:29<07:13, 3.71s/it] 78%|███████▊ | 404/520 [25:32<07:09, 3.70s/it] {'loss': 8.9464, 'grad_norm': 1.339311801931737e-05, 'learning_rate': 0.8757657896129298, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:32<07:09, 3.70s/it] 78%|███████▊ | 405/520 [25:36<07:11, 3.75s/it] {'loss': 9.7489, 'grad_norm': 1.5499954881286185e-05, 'learning_rate': 0.8613811186722706, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:36<07:11, 3.75s/it] 78%|███████▊ | 406/520 [25:40<07:06, 3.74s/it] {'loss': 9.756, 'grad_norm': 2.917023134859944e-05, 'learning_rate': 0.8470989689909141, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:40<07:06, 3.74s/it] 78%|███████▊ | 407/520 [25:44<07:02, 3.74s/it] {'loss': 9.9447, 'grad_norm': 2.040616766842299e-05, 'learning_rate': 0.8329198954894622, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:44<07:02, 3.74s/it] 78%|███████▊ | 408/520 [25:47<06:58, 3.74s/it] {'loss': 9.0818, 'grad_norm': 1.4520095354568969e-05, 'learning_rate': 0.8188444490835773, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:47<06:58, 3.74s/it] 79%|███████▊ | 409/520 [25:51<06:53, 3.72s/it] {'loss': 9.9026, 'grad_norm': 1.8135889450418586e-05, 'learning_rate': 0.8048731766625803, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:51<06:53, 3.72s/it] 79%|███████▉ | 410/520 [25:55<06:49, 3.72s/it] {'loss': 8.5735, 'grad_norm': 1.6298645133895624e-05, 'learning_rate': 0.7910066210682041, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:55<06:49, 3.72s/it] 79%|███████▉ | 411/520 [25:59<06:47, 3.74s/it] {'loss': 9.5502, 'grad_norm': 1.2676914889302897e-05, 'learning_rate': 0.7772453210734984, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:59<06:47, 3.74s/it] 79%|███████▉ | 412/520 [26:02<06:43, 3.74s/it] {'loss': 9.4124, 'grad_norm': 1.2007794886155792e-05, 'learning_rate': 0.7635898113618957, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:02<06:43, 3.74s/it] 79%|███████▉ | 413/520 [26:06<06:40, 3.74s/it] {'loss': 10.485, 'grad_norm': 1.273545332321315e-05, 'learning_rate': 0.7500406225064428, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:06<06:40, 3.74s/it] 80%|███████▉ | 414/520 [26:10<06:37, 3.75s/it] {'loss': 9.1954, 'grad_norm': 3.130472875426152e-05, 'learning_rate': 0.7365982809491765, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:10<06:37, 3.75s/it] 80%|███████▉ | 415/520 [26:14<06:33, 3.75s/it] {'loss': 8.7104, 'grad_norm': 1.4657997755046612e-05, 'learning_rate': 0.7232633089806773, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:14<06:33, 3.75s/it] 80%|████████ | 416/520 [26:17<06:30, 3.75s/it] {'loss': 9.3298, 'grad_norm': 1.9337513625788085e-05, 'learning_rate': 0.7100362247197725, 'epoch': 0.8} + 80%|████████ | 416/520 [26:17<06:30, 3.75s/it] 80%|████████ | 417/520 [26:21<06:25, 3.74s/it] {'loss': 9.2936, 'grad_norm': 1.3909256764573885e-05, 'learning_rate': 0.6969175420934025, 'epoch': 0.8} + 80%|████████ | 417/520 [26:21<06:25, 3.74s/it] 80%|████████ | 418/520 [26:25<06:22, 3.75s/it] {'loss': 9.49, 'grad_norm': 1.4784061603636364e-05, 'learning_rate': 0.6839077708166608, 'epoch': 0.8} + 80%|████████ | 418/520 [26:25<06:22, 3.75s/it] 81%|████████ | 419/520 [26:29<06:17, 3.73s/it] {'loss': 9.6577, 'grad_norm': 1.1032061605555293e-05, 'learning_rate': 0.6710074163729818, 'epoch': 0.81} + 81%|████████ | 419/520 [26:29<06:17, 3.73s/it] 81%|████████ | 420/520 [26:32<06:13, 3.73s/it] {'loss': 8.9953, 'grad_norm': 9.556649269855202e-06, 'learning_rate': 0.6582169799945022, 'epoch': 0.81} + 81%|████████ | 420/520 [26:32<06:13, 3.73s/it] 81%|████████ | 421/520 [26:36<06:07, 3.72s/it] {'loss': 8.7664, 'grad_norm': 2.2098201232696482e-05, 'learning_rate': 0.6455369586425894, 'epoch': 0.81} + 81%|████████ | 421/520 [26:36<06:07, 3.72s/it] 81%|████████ | 422/520 [26:40<06:05, 3.73s/it] {'loss': 8.9043, 'grad_norm': 8.82580134800223e-06, 'learning_rate': 0.6329678449885283, 'epoch': 0.81} + 81%|████████ | 422/520 [26:40<06:05, 3.73s/it] 81%|████████▏ | 423/520 [26:43<06:00, 3.72s/it] {'loss': 9.6783, 'grad_norm': 1.0933177483022208e-05, 'learning_rate': 0.6205101273943833, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:43<06:00, 3.72s/it] 82%|████████▏ | 424/520 [26:47<05:56, 3.71s/it] {'loss': 10.3372, 'grad_norm': 1.1947096536394798e-05, 'learning_rate': 0.6081642898940186, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:47<05:56, 3.71s/it] 82%|████████▏ | 425/520 [26:51<05:52, 3.71s/it] {'loss': 8.9154, 'grad_norm': 8.019334601335204e-06, 'learning_rate': 0.5959308121742938, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:51<05:52, 3.71s/it] 82%|████████▏ | 426/520 [26:55<05:47, 3.70s/it] {'loss': 9.7857, 'grad_norm': 1.021273259863902e-05, 'learning_rate': 0.5838101695564291, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:55<05:47, 3.70s/it] 82%|████████▏ | 427/520 [26:58<05:43, 3.70s/it] {'loss': 8.5915, 'grad_norm': 1.1527989398140075e-05, 'learning_rate': 0.5718028329775309, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:58<05:43, 3.70s/it] 82%|████████▏ | 428/520 [27:02<05:39, 3.69s/it] {'loss': 8.6255, 'grad_norm': 1.3535946398171562e-05, 'learning_rate': 0.5599092689723001, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:02<05:39, 3.69s/it] 82%|████████▎ | 429/520 [27:06<05:38, 3.72s/it] {'loss': 9.1095, 'grad_norm': 7.1582363174648735e-06, 'learning_rate': 0.5481299396549008, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:06<05:38, 3.72s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:09<05:34, 3.72s/it] {'loss': 8.438, 'grad_norm': 1.2901937216190119e-05, 'learning_rate': 0.5364653027010056, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:09<05:34, 3.72s/it] 83%|████████▎ | 431/520 [27:13<05:31, 3.72s/it] {'loss': 10.0725, 'grad_norm': 8.884768913720042e-06, 'learning_rate': 0.5249158113300181, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:13<05:31, 3.72s/it] 83%|████████▎ | 432/520 [27:17<05:28, 3.73s/it] {'loss': 8.7987, 'grad_norm': 8.496516162519537e-06, 'learning_rate': 0.5134819142874554, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:17<05:28, 3.73s/it] 83%|████████▎ | 433/520 [27:21<05:24, 3.73s/it] {'loss': 9.2012, 'grad_norm': 8.896631317790164e-06, 'learning_rate': 0.5021640558275203, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:21<05:24, 3.73s/it] 83%|████████▎ | 434/520 [27:24<05:20, 3.72s/it] {'loss': 8.1362, 'grad_norm': 2.3813334259919972e-05, 'learning_rate': 0.49096267569583396, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:24<05:20, 3.72s/it] 84%|████████▎ | 435/520 [27:28<05:15, 3.72s/it] {'loss': 9.5188, 'grad_norm': 1.4166762382898036e-05, 'learning_rate': 0.47987820911235435, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:28<05:15, 3.72s/it] 84%|████████▍ | 436/520 [27:32<05:12, 3.72s/it] {'loss': 8.8727, 'grad_norm': 1.9831287458177867e-05, 'learning_rate': 0.4689110867544645, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:32<05:12, 3.72s/it] 84%|████████▍ | 437/520 [27:36<05:09, 3.73s/it] {'loss': 9.8077, 'grad_norm': 2.1970905816657672e-05, 'learning_rate': 0.4580617347402376, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:36<05:09, 3.73s/it] 84%|████████▍ | 438/520 [27:39<05:08, 3.77s/it] {'loss': 8.498, 'grad_norm': 2.47203584849486e-05, 'learning_rate': 0.44733057461188136, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:39<05:08, 3.77s/it] 84%|████████▍ | 439/520 [27:43<05:07, 3.79s/it] {'loss': 9.586, 'grad_norm': 1.7544514904088022e-05, 'learning_rate': 0.4367180233193621, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:43<05:07, 3.79s/it] 85%|████████▍ | 440/520 [27:47<05:06, 3.83s/it] {'loss': 9.2259, 'grad_norm': 1.2644864542025371e-05, 'learning_rate': 0.4262244932041997, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:47<05:06, 3.83s/it] 85%|████████▍ | 441/520 [27:51<05:04, 3.85s/it] {'loss': 9.9265, 'grad_norm': 1.725786874459561e-05, 'learning_rate': 0.4158503919834516, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:51<05:04, 3.85s/it] 85%|████████▌ | 442/520 [27:55<05:01, 3.86s/it] {'loss': 9.2026, 'grad_norm': 1.9982492585625375e-05, 'learning_rate': 0.4055961227338662, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:55<05:01, 3.86s/it] 85%|████████▌ | 443/520 [27:59<04:54, 3.83s/it] {'loss': 9.2625, 'grad_norm': 2.254982824496817e-05, 'learning_rate': 0.395462083876224, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:59<04:54, 3.83s/it] 85%|████████▌ | 444/520 [28:02<04:48, 3.80s/it] {'loss': 9.1141, 'grad_norm': 1.9910282287874422e-05, 'learning_rate': 0.3854486691598601, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:02<04:48, 3.80s/it] 86%|████████▌ | 445/520 [28:06<04:43, 3.77s/it] {'loss': 8.8395, 'grad_norm': 1.550041490031129e-05, 'learning_rate': 0.3755562676473604, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:06<04:43, 3.77s/it] 86%|████████▌ | 446/520 [28:10<04:38, 3.76s/it] {'loss': 10.1054, 'grad_norm': 1.4868593695052476e-05, 'learning_rate': 0.36578526369944675, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:10<04:38, 3.76s/it] 86%|████████▌ | 447/520 [28:14<04:33, 3.74s/it] {'loss': 9.6812, 'grad_norm': 1.0463937988206918e-05, 'learning_rate': 0.35613603696004587, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:14<04:33, 3.74s/it] 86%|████████▌ | 448/520 [28:17<04:29, 3.74s/it] {'loss': 8.9973, 'grad_norm': 6.825496496898679e-06, 'learning_rate': 0.3466089623415334, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:17<04:29, 3.74s/it] 86%|████████▋ | 449/520 [28:21<04:25, 3.75s/it] {'loss': 10.1862, 'grad_norm': 1.3414458726715012e-05, 'learning_rate': 0.3372044100101723, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:21<04:25, 3.75s/it] 87%|████████▋ | 450/520 [28:25<04:21, 3.74s/it] {'loss': 9.4674, 'grad_norm': 9.060954486494624e-06, 'learning_rate': 0.3279227453717252, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:25<04:21, 3.74s/it] 87%|████████▋ | 451/520 [28:29<04:18, 3.74s/it] {'loss': 9.6159, 'grad_norm': 9.191022794471531e-06, 'learning_rate': 0.3187643290572617, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:29<04:18, 3.74s/it] 87%|████████▋ | 452/520 [28:32<04:13, 3.73s/it] {'loss': 10.0154, 'grad_norm': 1.3428839704231454e-05, 'learning_rate': 0.309729516909144, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:32<04:13, 3.73s/it] 87%|████████▋ | 453/520 [28:36<04:10, 3.74s/it] {'loss': 10.2303, 'grad_norm': 1.3567631045118098e-05, 'learning_rate': 0.3008186599671995, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:36<04:10, 3.74s/it] 87%|████████▋ | 454/520 [28:40<04:06, 3.73s/it] {'loss': 8.9617, 'grad_norm': 1.042112327698818e-05, 'learning_rate': 0.2920321044550833, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:40<04:06, 3.73s/it] 88%|████████▊ | 455/520 [28:43<04:02, 3.72s/it] {'loss': 9.3402, 'grad_norm': 1.1581286717176857e-05, 'learning_rate': 0.2833701917668277, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:43<04:02, 3.72s/it] 88%|████████▊ | 456/520 [28:47<03:58, 3.73s/it] {'loss': 8.904, 'grad_norm': 1.0808041678067827e-05, 'learning_rate': 0.2748332584535729, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:47<03:58, 3.73s/it] 88%|████████▊ | 457/520 [28:51<03:55, 3.74s/it] {'loss': 10.9525, 'grad_norm': 2.698784006497515e-05, 'learning_rate': 0.2664216362104964, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:51<03:55, 3.74s/it] 88%|████████▊ | 458/520 [28:55<03:51, 3.73s/it] {'loss': 9.6903, 'grad_norm': 1.2801253088730725e-05, 'learning_rate': 0.25813565186391974, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:55<03:51, 3.73s/it] 88%|████████▊ | 459/520 [28:58<03:48, 3.74s/it] {'loss': 9.4212, 'grad_norm': 1.147848519956329e-05, 'learning_rate': 0.24997562735861256, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:58<03:48, 3.74s/it] 88%|████████▊ | 460/520 [29:02<03:44, 3.73s/it] {'loss': 8.8762, 'grad_norm': 1.2808389708988578e-05, 'learning_rate': 0.24194187974528553, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:02<03:44, 3.73s/it] 89%|████████▊ | 461/520 [29:06<03:41, 3.75s/it] {'loss': 10.902, 'grad_norm': 2.7706417986635452e-05, 'learning_rate': 0.23403472116826723, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:06<03:41, 3.75s/it] 89%|████████▉ | 462/520 [29:10<03:42, 3.83s/it] {'loss': 10.5794, 'grad_norm': 2.0543477613472212e-05, 'learning_rate': 0.22625445885338102, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:10<03:42, 3.83s/it] 89%|████████▉ | 463/520 [29:14<03:42, 3.90s/it] {'loss': 9.0665, 'grad_norm': 1.5973569376732233e-05, 'learning_rate': 0.21860139509600318, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:14<03:42, 3.90s/it] 89%|████████▉ | 464/520 [29:18<03:38, 3.90s/it] {'loss': 9.7726, 'grad_norm': 1.299429610180082e-05, 'learning_rate': 0.2110758272493209, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:18<03:38, 3.90s/it] 89%|████████▉ | 465/520 [29:22<03:32, 3.86s/it] {'loss': 10.1302, 'grad_norm': 1.9609807302431493e-05, 'learning_rate': 0.20367804771277787, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:22<03:32, 3.86s/it] 90%|████████▉ | 466/520 [29:25<03:25, 3.81s/it] {'loss': 9.1812, 'grad_norm': 1.3177414241975787e-05, 'learning_rate': 0.1964083439207135, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:25<03:25, 3.81s/it] 90%|████████▉ | 467/520 [29:29<03:20, 3.78s/it] {'loss': 10.1757, 'grad_norm': 1.482000081007769e-05, 'learning_rate': 0.18926699833119393, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:29<03:20, 3.78s/it] 90%|█████████ | 468/520 [29:33<03:14, 3.75s/it] {'loss': 9.6216, 'grad_norm': 1.6966730572688198e-05, 'learning_rate': 0.18225428841503905, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:33<03:14, 3.75s/it] 90%|█████████ | 469/520 [29:36<03:10, 3.73s/it] {'loss': 9.7546, 'grad_norm': 1.4724251950296069e-05, 'learning_rate': 0.17537048664503901, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:36<03:10, 3.73s/it] 90%|█████████ | 470/520 [29:40<03:06, 3.72s/it] {'loss': 9.1825, 'grad_norm': 1.1272139558805408e-05, 'learning_rate': 0.16861586048537175, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:40<03:06, 3.72s/it] 91%|█████████ | 471/520 [29:44<03:02, 3.72s/it] {'loss': 9.9318, 'grad_norm': 1.8945102749565823e-05, 'learning_rate': 0.16199067238120612, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:44<03:02, 3.72s/it] 91%|█████████ | 472/520 [29:48<02:58, 3.73s/it] {'loss': 9.1271, 'grad_norm': 1.0048558223074317e-05, 'learning_rate': 0.15549517974850724, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:48<02:58, 3.73s/it] 91%|█████████ | 473/520 [29:51<02:55, 3.72s/it] {'loss': 9.2163, 'grad_norm': 8.304712457208525e-06, 'learning_rate': 0.14912963496403675, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:51<02:55, 3.72s/it] 91%|█████████ | 474/520 [29:55<02:51, 3.72s/it] {'loss': 10.5051, 'grad_norm': 1.2235211943374257e-05, 'learning_rate': 0.14289428535554283, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:55<02:51, 3.72s/it] 91%|█████████▏| 475/520 [29:59<02:47, 3.72s/it] {'loss': 9.4607, 'grad_norm': 2.067713034783406e-05, 'learning_rate': 0.1367893731921518, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:59<02:47, 3.72s/it] 92%|█████████▏| 476/520 [30:02<02:43, 3.72s/it] {'loss': 9.401, 'grad_norm': 8.704937401261354e-06, 'learning_rate': 0.1308151356749579, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:02<02:43, 3.72s/it] 92%|█████████▏| 477/520 [30:06<02:40, 3.73s/it] {'loss': 9.0667, 'grad_norm': 7.63543606383216e-06, 'learning_rate': 0.12497180492780319, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:06<02:40, 3.73s/it] 92%|█████████▏| 478/520 [30:10<02:36, 3.73s/it] {'loss': 8.8972, 'grad_norm': 9.132981158875751e-06, 'learning_rate': 0.1192596079882613, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:10<02:36, 3.73s/it] 92%|█████████▏| 479/520 [30:14<02:32, 3.72s/it] {'loss': 10.3226, 'grad_norm': 1.0489007564123594e-05, 'learning_rate': 0.11367876679881361, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:14<02:32, 3.72s/it] 92%|█████████▏| 480/520 [30:17<02:29, 3.73s/it] {'loss': 10.2488, 'grad_norm': 1.1648528658072526e-05, 'learning_rate': 0.10822949819822753, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:17<02:29, 3.73s/it] 92%|█████████▎| 481/520 [30:21<02:26, 3.77s/it] {'loss': 10.0558, 'grad_norm': 1.1410698793453667e-05, 'learning_rate': 0.10291201391313165, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:21<02:26, 3.77s/it] 93%|█████████▎| 482/520 [30:25<02:22, 3.76s/it] {'loss': 10.5568, 'grad_norm': 1.1100101963425234e-05, 'learning_rate': 0.09772652054978925, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:25<02:22, 3.76s/it] 93%|█████████▎| 483/520 [30:29<02:18, 3.76s/it] {'loss': 9.5733, 'grad_norm': 1.0134973486743016e-05, 'learning_rate': 0.09267321958606828, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:29<02:18, 3.76s/it] 93%|█████████▎| 484/520 [30:32<02:14, 3.74s/it] {'loss': 9.6134, 'grad_norm': 9.391479932311239e-06, 'learning_rate': 0.08775230736361733, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:32<02:14, 3.74s/it] 93%|█████████▎| 485/520 [30:36<02:10, 3.74s/it] {'loss': 9.0455, 'grad_norm': 8.491879517867807e-06, 'learning_rate': 0.08296397508023323, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:36<02:10, 3.74s/it] 93%|█████████▎| 486/520 [30:40<02:07, 3.74s/it] {'loss': 9.4867, 'grad_norm': 1.4669322964508396e-05, 'learning_rate': 0.07830840878243411, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:40<02:07, 3.74s/it] 94%|█████████▎| 487/520 [30:44<02:03, 3.74s/it] {'loss': 8.7106, 'grad_norm': 1.1401470758602656e-05, 'learning_rate': 0.07378578935823071, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:44<02:03, 3.74s/it] 94%|█████████▍| 488/520 [30:47<02:00, 3.76s/it] {'loss': 9.0055, 'grad_norm': 9.58721311703888e-06, 'learning_rate': 0.0693962925300966, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:47<02:00, 3.76s/it] 94%|█████████▍| 489/520 [30:51<01:57, 3.78s/it] {'loss': 9.9512, 'grad_norm': 1.2756710978237036e-05, 'learning_rate': 0.06514008884814321, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:51<01:57, 3.78s/it] 94%|█████████▍| 490/520 [30:55<01:54, 3.80s/it] {'loss': 9.1245, 'grad_norm': 8.01087032537789e-06, 'learning_rate': 0.06101734368349104, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:55<01:54, 3.80s/it] 94%|█████████▍| 491/520 [30:59<01:49, 3.79s/it] {'loss': 9.0733, 'grad_norm': 8.25564307511268e-06, 'learning_rate': 0.05702821722184537, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:59<01:49, 3.79s/it] 95%|█████████▍| 492/520 [31:03<01:45, 3.76s/it] {'loss': 9.3396, 'grad_norm': 9.41111358123465e-06, 'learning_rate': 0.05317286445727193, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:03<01:45, 3.76s/it] 95%|█████████▍| 493/520 [31:06<01:41, 3.75s/it] {'loss': 10.4644, 'grad_norm': 1.0726011327061952e-05, 'learning_rate': 0.0494514351861744, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:06<01:41, 3.75s/it] 95%|█████████▌| 494/520 [31:10<01:37, 3.74s/it] {'loss': 9.3939, 'grad_norm': 1.0081311946914864e-05, 'learning_rate': 0.045864074001476185, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:10<01:37, 3.74s/it] 95%|█████████▌| 495/520 [31:14<01:33, 3.72s/it] {'loss': 8.5938, 'grad_norm': 1.43663082823567e-05, 'learning_rate': 0.0424109202869985, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:14<01:33, 3.72s/it] 95%|█████████▌| 496/520 [31:17<01:29, 3.72s/it] {'loss': 8.8638, 'grad_norm': 1.1441529695310337e-05, 'learning_rate': 0.03909210821205017, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:17<01:29, 3.72s/it] 96%|█████████▌| 497/520 [31:21<01:25, 3.71s/it] {'loss': 9.8088, 'grad_norm': 1.4978863720465978e-05, 'learning_rate': 0.035907766726209045, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:21<01:25, 3.71s/it] 96%|█████████▌| 498/520 [31:25<01:21, 3.70s/it] {'loss': 8.9546, 'grad_norm': 1.096403297286955e-05, 'learning_rate': 0.032858019554315165, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:25<01:21, 3.70s/it] 96%|█████████▌| 499/520 [31:29<01:17, 3.71s/it] {'loss': 10.4059, 'grad_norm': 1.17245494334652e-05, 'learning_rate': 0.029942985191663662, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:29<01:17, 3.71s/it] 96%|█████████▌| 500/520 [31:32<01:13, 3.69s/it] {'loss': 9.8358, 'grad_norm': 1.2810340864691434e-05, 'learning_rate': 0.027162776899397778, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:32<01:13, 3.69s/it] 96%|█████████▋| 501/520 [31:36<01:10, 3.71s/it] {'loss': 10.1184, 'grad_norm': 1.0475452820360503e-05, 'learning_rate': 0.024517502700111327, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:36<01:10, 3.71s/it] 97%|█████████▋| 502/520 [31:40<01:06, 3.71s/it] {'loss': 9.2519, 'grad_norm': 1.055953980667504e-05, 'learning_rate': 0.022007265373650886, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:40<01:06, 3.71s/it] 97%|█████████▋| 503/520 [31:43<01:02, 3.70s/it] {'loss': 10.0421, 'grad_norm': 1.1964826227997773e-05, 'learning_rate': 0.019632162453120827, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:43<01:02, 3.70s/it] 97%|█████████▋| 504/520 [31:47<01:00, 3.75s/it] {'loss': 9.5721, 'grad_norm': 1.2654563523805742e-05, 'learning_rate': 0.017392286221095066, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:47<01:00, 3.75s/it] 97%|█████████▋| 505/520 [31:51<00:56, 3.76s/it] {'loss': 9.4479, 'grad_norm': 9.857663493439105e-06, 'learning_rate': 0.015287723706031653, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:51<00:56, 3.76s/it] 97%|█████████▋| 506/520 [31:55<00:52, 3.74s/it] {'loss': 8.9068, 'grad_norm': 8.777480983357151e-06, 'learning_rate': 0.013318556678890592, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:55<00:52, 3.74s/it] 98%|█████████▊| 507/520 [31:59<00:49, 3.78s/it] {'loss': 10.859, 'grad_norm': 1.5538915564086777e-05, 'learning_rate': 0.011484861649957212, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:59<00:49, 3.78s/it] 98%|█████████▊| 508/520 [32:02<00:44, 3.75s/it] {'loss': 9.6244, 'grad_norm': 1.2751756108041719e-05, 'learning_rate': 0.009786709865869547, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:02<00:44, 3.75s/it] 98%|█████████▊| 509/520 [32:06<00:41, 3.74s/it] {'loss': 9.0412, 'grad_norm': 1.1395189915911045e-05, 'learning_rate': 0.00822416730684894, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:06<00:41, 3.74s/it] 98%|█████████▊| 510/520 [32:10<00:37, 3.73s/it] {'loss': 9.0964, 'grad_norm': 7.984305241827757e-06, 'learning_rate': 0.006797294684138533, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:10<00:37, 3.73s/it] 98%|█████████▊| 511/520 [32:13<00:33, 3.71s/it] {'loss': 9.2178, 'grad_norm': 8.393383824214908e-06, 'learning_rate': 0.005506147437641884, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:13<00:33, 3.71s/it] 98%|█████████▊| 512/520 [32:17<00:29, 3.75s/it] {'loss': 8.7408, 'grad_norm': 1.2535673302236669e-05, 'learning_rate': 0.0043507757337717945, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:17<00:29, 3.75s/it] 99%|█████████▊| 513/520 [32:21<00:26, 3.82s/it] {'loss': 9.2977, 'grad_norm': 8.076790888607833e-06, 'learning_rate': 0.003331224463497706, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:21<00:26, 3.82s/it] 99%|█████████▉| 514/520 [32:25<00:22, 3.82s/it] {'loss': 9.308, 'grad_norm': 9.802954853637299e-06, 'learning_rate': 0.002447533240604871, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:25<00:22, 3.82s/it] 99%|█████████▉| 515/520 [32:29<00:19, 3.83s/it] {'loss': 9.699, 'grad_norm': 1.3048810146765658e-05, 'learning_rate': 0.0016997364001532511, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:29<00:19, 3.83s/it] 99%|█████████▉| 516/520 [32:33<00:15, 3.80s/it] {'loss': 9.2662, 'grad_norm': 8.149410134180254e-06, 'learning_rate': 0.0010878629971431408, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:33<00:15, 3.80s/it] 99%|█████████▉| 517/520 [32:36<00:11, 3.77s/it] {'loss': 10.3063, 'grad_norm': 9.364433591055811e-06, 'learning_rate': 0.000611936805387514, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:36<00:11, 3.77s/it] 100%|█████████▉| 518/520 [32:40<00:07, 3.73s/it] {'loss': 9.2228, 'grad_norm': 8.240625913029291e-06, 'learning_rate': 0.00027197631658798516, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:40<00:07, 3.73s/it] 100%|█████████▉| 519/520 [32:43<00:03, 3.69s/it] {'loss': 10.0716, 'grad_norm': 1.1916917222437734e-05, 'learning_rate': 6.799473961632829e-05, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:43<00:03, 3.69s/it] 100%|██████████| 520/520 [32:48<00:00, 3.95s/it] {'loss': 10.375, 'grad_norm': 1.6866085013355633e-05, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:48<00:00, 3.95s/it] {'train_runtime': 1968.5177, 'train_samples_per_second': 33.796, 'train_steps_per_second': 0.264, 'train_loss': 9.467700474308087, 'epoch': 1.0} + 100%|██████████| 520/520 [32:48<00:00, 3.95s/it] 100%|██████████| 520/520 [32:48<00:00, 3.79s/it] +[2025-10-10 08:41:03,328] [INFO] [launch.py:348:main] Process 1897682 exits successfully. +[2025-10-10 08:41:03,329] [INFO] [launch.py:348:main] Process 1897681 exits successfully. +[2025-10-10 08:41:03,329] [INFO] [launch.py:348:main] Process 1897687 exits successfully. +[2025-10-10 08:41:04,331] [INFO] [launch.py:348:main] Process 1897684 exits successfully. +[2025-10-10 08:41:04,332] [INFO] [launch.py:348:main] Process 1897686 exits successfully. +[2025-10-10 08:41:04,332] [INFO] [launch.py:348:main] Process 1897685 exits successfully. +[2025-10-10 08:41:04,332] [INFO] [launch.py:348:main] Process 1897683 exits successfully. +[2025-10-10 08:41:08,337] [INFO] [launch.py:348:main] Process 1897680 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251010_080549.log +Timestamp: 2025-10-10 08:41:10 +===================================== diff --git a/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251010_084110.log b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251010_084110.log new file mode 100644 index 0000000000000000000000000000000000000000..7b163153a606618a7bb9bc2f7f104a38ae8dc226 --- /dev/null +++ b/logs_oct10/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251010_084110.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251010_084110.log +Timestamp: 2025-10-10 08:41:10 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 08:41:13,568] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:16,475] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-10 08:41:16,477] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json --image_folder /s3-code/ywang29/datasets/tinyllava --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 9 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 9 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 08:41:19,115] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:20,201] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-10 08:41:20,201] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-10 08:41:20,201] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-10 08:41:20,201] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-10 08:41:20,201] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-10 08:41:20,201] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-10 08:41:20,201] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-10 08:41:20,204] [INFO] [launch.py:253:main] process 1920187 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:20,206] [INFO] [launch.py:253:main] process 1920188 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:20,208] [INFO] [launch.py:253:main] process 1920189 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:20,210] [INFO] [launch.py:253:main] process 1920190 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:20,212] [INFO] [launch.py:253:main] process 1920191 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:20,214] [INFO] [launch.py:253:main] process 1920192 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:20,216] [INFO] [launch.py:253:main] process 1920193 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-10 08:41:20,218] [INFO] [launch.py:253:main] process 1920194 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/s3-code/ywang29/datasets/tinyllava/text_files/llava_v1_5_mix665k.json', '--image_folder', '/s3-code/ywang29/datasets/tinyllava', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-10 08:41:27,030] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:27,032] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:27,039] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:27,084] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:27,098] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:27,099] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:27,100] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:27,106] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-10 08:41:27,456] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:27,456] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-10 08:41:27,459] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:27,463] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:27,495] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:27,506] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:27,520] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:27,523] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-10 08:41:27,539] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1920187:1920187 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920187:1920187 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920187:1920187 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1920187:1920187 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1920187:1920187 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1920187:1920187 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1920188:1920188 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1920188:1920188 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920188:1920188 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920188:1920188 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1920188:1920188 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1920188:1920188 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1920193:1920193 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1920193:1920193 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920193:1920193 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920193:1920193 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1920193:1920193 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1920193:1920193 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1920189:1920189 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1920189:1920189 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920189:1920189 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920189:1920189 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1920189:1920189 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1920189:1920189 [2] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1920190:1920190 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1920190:1920190 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920190:1920190 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920190:1920190 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1920190:1920190 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1920190:1920190 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1920194:1920194 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1920194:1920194 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920194:1920194 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920194:1920194 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1920194:1920194 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1920194:1920194 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1920192:1920192 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1920192:1920192 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920192:1920192 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920192:1920192 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1920192:1920192 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1920192:1920192 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1920191:1920191 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1920191:1920191 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920191:1920191 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920191:1920191 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1920191:1920191 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1920191:1920191 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO ncclCommInitRank comm 0x560788399a00 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x4902381c06e11a95 - Init START +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO ncclCommInitRank comm 0x55686d200dd0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x4902381c06e11a95 - Init START +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO ncclCommInitRank comm 0x56348aff0110 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x4902381c06e11a95 - Init START +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO ncclCommInitRank comm 0x561a00a706e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x4902381c06e11a95 - Init START +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO ncclCommInitRank comm 0x563a99b45380 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x4902381c06e11a95 - Init START +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO ncclCommInitRank comm 0x55ea3abae930 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x4902381c06e11a95 - Init START +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO ncclCommInitRank comm 0x55a955f120b0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x4902381c06e11a95 - Init START +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO ncclCommInitRank comm 0x561d110f7560 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x4902381c06e11a95 - Init START +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO comm 0x55ea3abae930 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO comm 0x55a955f120b0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO comm 0x561d110f7560 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO comm 0x56348aff0110 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO comm 0x55686d200dd0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO comm 0x561a00a706e0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO comm 0x563a99b45380 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO comm 0x560788399a00 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1920191:1921826 [4] NCCL INFO ncclCommInitRank comm 0x560788399a00 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x4902381c06e11a95 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920194:1921807 [7] NCCL INFO ncclCommInitRank comm 0x55686d200dd0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x4902381c06e11a95 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1920187:1921786 [0] NCCL INFO ncclCommInitRank comm 0x56348aff0110 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x4902381c06e11a95 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1920190:1921806 [3] NCCL INFO ncclCommInitRank comm 0x55ea3abae930 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x4902381c06e11a95 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1920192:1921825 [5] NCCL INFO ncclCommInitRank comm 0x563a99b45380 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x4902381c06e11a95 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1920188:1921803 [1] NCCL INFO ncclCommInitRank comm 0x561d110f7560 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x4902381c06e11a95 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1920193:1921804 [6] NCCL INFO ncclCommInitRank comm 0x561a00a706e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x4902381c06e11a95 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1920189:1921805 [2] NCCL INFO ncclCommInitRank comm 0x55a955f120b0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x4902381c06e11a95 - Init COMPLETE +[2025-10-10 08:42:08,796] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from loading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model + + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-10 08:42:10,654] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-10 08:42:28,906 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-10 08:42:28,911 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:005->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1920193:1926851 [6] NCCL INFO ncclCommInitRank comm 0x7f10dc06a7e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xe9ac2da54c95d2df - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920191:1926852 [4] NCCL INFO ncclCommInitRank comm 0x7f82d006a130 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xe9ac2da54c95d2df - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920189:1926849 [2] NCCL INFO ncclCommInitRank comm 0x7fb2bc06a970 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xe9ac2da54c95d2df - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920187:1926847 [0] NCCL INFO ncclCommInitRank comm 0x7fe18006af90 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xe9ac2da54c95d2df - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920192:1926853 [5] NCCL INFO ncclCommInitRank comm 0x7fae1406a9c0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xe9ac2da54c95d2df - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920194:1926848 [7] NCCL INFO ncclCommInitRank comm 0x7fdcf006a150 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xe9ac2da54c95d2df - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920188:1926854 [1] NCCL INFO ncclCommInitRank comm 0x7fd7f006a8a0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xe9ac2da54c95d2df - Init COMPLETE +ywang29-vrdb-test1-worker-0:1920190:1926850 [3] NCCL INFO ncclCommInitRank comm 0x7fa84406a500 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xe9ac2da54c95d2df - Init COMPLETE + 0%| | 1/520 [00:13<1:59:38, 13.83s/it] {'loss': 2.0453, 'grad_norm': 0.0048344756228119925, 'learning_rate': 0.5625, 'epoch': 0.0} + 0%| | 1/520 [00:13<1:59:38, 13.83s/it] 0%| | 2/520 [00:17<1:07:46, 7.85s/it] {'loss': 2.0549, 'grad_norm': 0.0052492953547402426, 'learning_rate': 1.125, 'epoch': 0.0} + 0%| | 2/520 [00:17<1:07:46, 7.85s/it] 1%| | 3/520 [00:21<51:20, 5.96s/it] {'loss': 1.717, 'grad_norm': 0.003258468645129139, 'learning_rate': 1.6875, 'epoch': 0.01} + 1%| | 3/520 [00:21<51:20, 5.96s/it] 1%| | 4/520 [00:24<43:25, 5.05s/it] {'loss': 2.5724, 'grad_norm': 0.02514430857595334, 'learning_rate': 2.25, 'epoch': 0.01} + 1%| | 4/520 [00:24<43:25, 5.05s/it] 1%| | 5/520 [00:28<38:57, 4.54s/it] {'loss': 9.4485, 'grad_norm': 0.8061943895484961, 'learning_rate': 2.8125, 'epoch': 0.01} + 1%| | 5/520 [00:28<38:57, 4.54s/it] 1%| | 6/520 [00:32<36:19, 4.24s/it] {'loss': 13.9854, 'grad_norm': 0.4158375681564969, 'learning_rate': 3.375, 'epoch': 0.01} + 1%| | 6/520 [00:32<36:19, 4.24s/it] 1%|▏ | 7/520 [00:35<34:44, 4.06s/it] {'loss': 21.4752, 'grad_norm': 1.6192806178481234, 'learning_rate': 3.9375, 'epoch': 0.01} + 1%|▏ | 7/520 [00:35<34:44, 4.06s/it] 2%|▏ | 8/520 [00:40<35:07, 4.12s/it] {'loss': 16.7481, 'grad_norm': 0.03378776734654979, 'learning_rate': 4.5, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<35:07, 4.12s/it] 2%|▏ | 9/520 [00:43<34:06, 4.00s/it] {'loss': 16.3414, 'grad_norm': 0.02118042053215167, 'learning_rate': 5.0625, 'epoch': 0.02} + 2%|▏ | 9/520 [00:43<34:06, 4.00s/it] 2%|▏ | 10/520 [00:47<33:33, 3.95s/it] {'loss': 16.1437, 'grad_norm': 0.007404340935325572, 'learning_rate': 5.625, 'epoch': 0.02} + 2%|▏ | 10/520 [00:47<33:33, 3.95s/it] 2%|▏ | 11/520 [00:51<33:31, 3.95s/it] {'loss': 15.8041, 'grad_norm': 0.007483212147423048, 'learning_rate': 6.1875, 'epoch': 0.02} + 2%|▏ | 11/520 [00:51<33:31, 3.95s/it] 2%|▏ | 12/520 [00:55<33:20, 3.94s/it] {'loss': 15.6313, 'grad_norm': 0.004748137401485563, 'learning_rate': 6.75, 'epoch': 0.02} + 2%|▏ | 12/520 [00:55<33:20, 3.94s/it][2025-10-10 08:43:33,380] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:00<34:50, 4.12s/it] {'loss': 15.4996, 'grad_norm': 0.004277414245041221, 'learning_rate': 7.3125, 'epoch': 0.03} + 2%|▎ | 13/520 [01:00<34:50, 4.12s/it] 3%|▎ | 14/520 [01:03<34:05, 4.04s/it] {'loss': 15.29, 'grad_norm': 0.0011949921506710982, 'learning_rate': 7.875, 'epoch': 0.03} + 3%|▎ | 14/520 [01:03<34:05, 4.04s/it] 3%|▎ | 15/520 [01:07<33:36, 3.99s/it] {'loss': 14.848, 'grad_norm': 0.0033943898891935324, 'learning_rate': 8.4375, 'epoch': 0.03} + 3%|▎ | 15/520 [01:07<33:36, 3.99s/it] 3%|▎ | 16/520 [01:11<33:16, 3.96s/it] {'loss': 13.4781, 'grad_norm': 0.020828324966039227, 'learning_rate': 9.0, 'epoch': 0.03} + 3%|▎ | 16/520 [01:11<33:16, 3.96s/it] 3%|▎ | 17/520 [01:15<32:49, 3.91s/it] {'loss': 20.44, 'grad_norm': 0.16133197195448842, 'learning_rate': 8.999912578191921, 'epoch': 0.03} + 3%|▎ | 17/520 [01:15<32:49, 3.91s/it] 3%|▎ | 18/520 [01:19<32:12, 3.85s/it] {'loss': 15.8299, 'grad_norm': 0.0009645702718575879, 'learning_rate': 8.999650316164386, 'epoch': 0.03} + 3%|▎ | 18/520 [01:19<32:12, 3.85s/it] 4%|▎ | 19/520 [01:22<31:47, 3.81s/it] {'loss': 15.7269, 'grad_norm': 0.0008202848830947141, 'learning_rate': 8.99921322410736, 'epoch': 0.04} + 4%|▎ | 19/520 [01:22<31:47, 3.81s/it] 4%|▍ | 20/520 [01:26<31:28, 3.78s/it] {'loss': 15.6971, 'grad_norm': 0.0011778973938298004, 'learning_rate': 8.998601319003672, 'epoch': 0.04} + 4%|▍ | 20/520 [01:26<31:28, 3.78s/it] 4%|▍ | 21/520 [01:30<31:11, 3.75s/it] {'loss': 15.4885, 'grad_norm': 0.0016024787425176767, 'learning_rate': 8.997814624628374, 'epoch': 0.04} + 4%|▍ | 21/520 [01:30<31:11, 3.75s/it] 4%|▍ | 22/520 [01:33<30:57, 3.73s/it] {'loss': 15.4554, 'grad_norm': 0.0011229328394896953, 'learning_rate': 8.996853171547794, 'epoch': 0.04} + 4%|▍ | 22/520 [01:33<30:57, 3.73s/it] 4%|▍ | 23/520 [01:37<30:42, 3.71s/it] {'loss': 15.3076, 'grad_norm': 0.0007059947639776948, 'learning_rate': 8.995716997118361, 'epoch': 0.04} + 4%|▍ | 23/520 [01:37<30:42, 3.71s/it] 5%|▍ | 24/520 [01:41<30:34, 3.70s/it] {'loss': 15.3514, 'grad_norm': 0.0008139690397017172, 'learning_rate': 8.99440614548515, 'epoch': 0.05} + 5%|▍ | 24/520 [01:41<30:34, 3.70s/it] 5%|▍ | 25/520 [01:44<30:23, 3.68s/it] {'loss': 15.1438, 'grad_norm': 0.002243983018526894, 'learning_rate': 8.992920667580174, 'epoch': 0.05} + 5%|▍ | 25/520 [01:44<30:23, 3.68s/it] 5%|▌ | 26/520 [01:48<30:16, 3.68s/it] {'loss': 15.2056, 'grad_norm': 0.00043525187947098394, 'learning_rate': 8.991260621120393, 'epoch': 0.05} + 5%|▌ | 26/520 [01:48<30:16, 3.68s/it] 5%|▌ | 27/520 [01:52<30:09, 3.67s/it] {'loss': 15.1008, 'grad_norm': 0.0005775078298703864, 'learning_rate': 8.98942607060548, 'epoch': 0.05} + 5%|▌ | 27/520 [01:52<30:09, 3.67s/it] 5%|▌ | 28/520 [01:55<30:00, 3.66s/it] {'loss': 14.9924, 'grad_norm': 0.0006871658175602714, 'learning_rate': 8.98741708731531, 'epoch': 0.05} + 5%|▌ | 28/520 [01:55<30:00, 3.66s/it] 6%|▌ | 29/520 [01:59<30:04, 3.68s/it] {'loss': 15.0557, 'grad_norm': 0.00041865933994745294, 'learning_rate': 8.985233749307199, 'epoch': 0.06} + 6%|▌ | 29/520 [01:59<30:04, 3.68s/it] 6%|▌ | 30/520 [02:03<30:02, 3.68s/it] {'loss': 15.1926, 'grad_norm': 0.00024041338156160626, 'learning_rate': 8.982876141412856, 'epoch': 0.06} + 6%|▌ | 30/520 [02:03<30:02, 3.68s/it] 6%|▌ | 31/520 [02:06<29:54, 3.67s/it] {'loss': 15.1235, 'grad_norm': 0.0003107062028717986, 'learning_rate': 8.980344355235102, 'epoch': 0.06} + 6%|▌ | 31/520 [02:06<29:54, 3.67s/it] 6%|▌ | 32/520 [02:10<29:54, 3.68s/it] {'loss': 15.135, 'grad_norm': 0.00022211173785764428, 'learning_rate': 8.977638489144306, 'epoch': 0.06} + 6%|▌ | 32/520 [02:10<29:54, 3.68s/it] 6%|▋ | 33/520 [02:14<29:49, 3.68s/it] {'loss': 14.9994, 'grad_norm': 0.0002908106476544774, 'learning_rate': 8.974758648274559, 'epoch': 0.06} + 6%|▋ | 33/520 [02:14<29:49, 3.68s/it] 7%|▋ | 34/520 [02:18<29:47, 3.68s/it] {'loss': 14.8904, 'grad_norm': 0.0003489906374394602, 'learning_rate': 8.971704944519592, 'epoch': 0.07} + 7%|▋ | 34/520 [02:18<29:47, 3.68s/it] 7%|▋ | 35/520 [02:21<29:37, 3.66s/it] {'loss': 14.9223, 'grad_norm': 0.00030361089245945145, 'learning_rate': 8.968477496528427, 'epoch': 0.07} + 7%|▋ | 35/520 [02:21<29:37, 3.66s/it] 7%|▋ | 36/520 [02:25<29:33, 3.66s/it] {'loss': 15.0884, 'grad_norm': 0.0001820720552000969, 'learning_rate': 8.965076429700774, 'epoch': 0.07} + 7%|▋ | 36/520 [02:25<29:33, 3.66s/it] 7%|▋ | 37/520 [02:28<29:23, 3.65s/it] {'loss': 15.1524, 'grad_norm': 0.00016811115279179986, 'learning_rate': 8.961501876182147, 'epoch': 0.07} + 7%|▋ | 37/520 [02:28<29:23, 3.65s/it] 7%|▋ | 38/520 [02:32<29:25, 3.66s/it] {'loss': 15.1325, 'grad_norm': 0.00015833226665966715, 'learning_rate': 8.957753974858736, 'epoch': 0.07} + 7%|▋ | 38/520 [02:32<29:25, 3.66s/it] 8%|▊ | 39/520 [02:36<29:24, 3.67s/it] {'loss': 14.868, 'grad_norm': 0.0001960587783201661, 'learning_rate': 8.953832871352017, 'epoch': 0.07} + 8%|▊ | 39/520 [02:36<29:24, 3.67s/it] 8%|▊ | 40/520 [02:39<29:15, 3.66s/it] {'loss': 14.9818, 'grad_norm': 0.0001659088326504537, 'learning_rate': 8.949738718013078, 'epoch': 0.08} + 8%|▊ | 40/520 [02:39<29:15, 3.66s/it] 8%|▊ | 41/520 [02:43<29:14, 3.66s/it] {'loss': 14.938, 'grad_norm': 0.00018619823172736245, 'learning_rate': 8.945471673916716, 'epoch': 0.08} + 8%|▊ | 41/520 [02:43<29:14, 3.66s/it] 8%|▊ | 42/520 [02:47<29:02, 3.65s/it] {'loss': 14.805, 'grad_norm': 0.00016498059334763002, 'learning_rate': 8.941031904855246, 'epoch': 0.08} + 8%|▊ | 42/520 [02:47<29:02, 3.65s/it] 8%|▊ | 43/520 [02:50<29:03, 3.66s/it] {'loss': 14.9035, 'grad_norm': 0.0001403661289293322, 'learning_rate': 8.93641958333206, 'epoch': 0.08} + 8%|▊ | 43/520 [02:50<29:03, 3.66s/it] 8%|▊ | 44/520 [02:54<29:05, 3.67s/it] {'loss': 15.113, 'grad_norm': 0.00013870019324622939, 'learning_rate': 8.931634888554937, 'epoch': 0.08} + 8%|▊ | 44/520 [02:54<29:05, 3.67s/it] 9%|▊ | 45/520 [02:58<29:02, 3.67s/it] {'loss': 14.9065, 'grad_norm': 0.0002221501698056592, 'learning_rate': 8.926678006429055, 'epoch': 0.09} + 9%|▊ | 45/520 [02:58<29:02, 3.67s/it] 9%|▉ | 46/520 [03:01<28:55, 3.66s/it] {'loss': 15.0505, 'grad_norm': 0.00014747753559101614, 'learning_rate': 8.921549129549797, 'epoch': 0.09} + 9%|▉ | 46/520 [03:01<28:55, 3.66s/it] 9%|▉ | 47/520 [03:05<28:50, 3.66s/it] {'loss': 14.9427, 'grad_norm': 0.00014321428194327614, 'learning_rate': 8.916248457195245, 'epoch': 0.09} + 9%|▉ | 47/520 [03:05<28:50, 3.66s/it] 9%|▉ | 48/520 [03:09<28:40, 3.64s/it] {'loss': 14.832, 'grad_norm': 0.00016665413538784383, 'learning_rate': 8.910776195318448, 'epoch': 0.09} + 9%|▉ | 48/520 [03:09<28:40, 3.64s/it] 9%|▉ | 49/520 [03:12<28:42, 3.66s/it] {'loss': 14.9589, 'grad_norm': 0.00015125189447852917, 'learning_rate': 8.905132556539417, 'epoch': 0.09} + 9%|▉ | 49/520 [03:12<28:42, 3.66s/it] 10%|▉ | 50/520 [03:16<28:46, 3.67s/it] {'loss': 14.9402, 'grad_norm': 0.00014204079140054787, 'learning_rate': 8.89931776013687, 'epoch': 0.1} + 10%|▉ | 50/520 [03:16<28:46, 3.67s/it] 10%|▉ | 51/520 [03:20<28:42, 3.67s/it] {'loss': 14.8461, 'grad_norm': 0.00013693575808544967, 'learning_rate': 8.8933320320397, 'epoch': 0.1} + 10%|▉ | 51/520 [03:20<28:42, 3.67s/it] 10%|█ | 52/520 [03:23<28:41, 3.68s/it] {'loss': 14.8884, 'grad_norm': 0.00012156381915535606, 'learning_rate': 8.887175604818205, 'epoch': 0.1} + 10%|█ | 52/520 [03:23<28:41, 3.68s/it] 10%|█ | 53/520 [03:27<28:30, 3.66s/it] {'loss': 15.0422, 'grad_norm': 0.00010505810276807093, 'learning_rate': 8.880848717675054, 'epoch': 0.1} + 10%|█ | 53/520 [03:27<28:30, 3.66s/it] 10%|█ | 54/520 [03:31<28:32, 3.68s/it] {'loss': 14.8821, 'grad_norm': 9.55716786663105e-05, 'learning_rate': 8.874351616435986, 'epoch': 0.1} + 10%|█ | 54/520 [03:31<28:32, 3.68s/it] 11%|█ | 55/520 [03:34<28:24, 3.67s/it] {'loss': 14.727, 'grad_norm': 0.00011545785737574079, 'learning_rate': 8.86768455354026, 'epoch': 0.11} + 11%|█ | 55/520 [03:34<28:24, 3.67s/it] 11%|█ | 56/520 [03:38<28:17, 3.66s/it] {'loss': 14.9583, 'grad_norm': 9.83862525049757e-05, 'learning_rate': 8.860847788030851, 'epoch': 0.11} + 11%|█ | 56/520 [03:38<28:17, 3.66s/it] 11%|█ | 57/520 [03:42<28:26, 3.69s/it] {'loss': 14.8124, 'grad_norm': 8.347076401698107e-05, 'learning_rate': 8.853841585544384, 'epoch': 0.11} + 11%|█ | 57/520 [03:42<28:26, 3.69s/it] 11%|█ | 58/520 [03:45<28:23, 3.69s/it] {'loss': 15.0783, 'grad_norm': 9.716186854659631e-05, 'learning_rate': 8.846666218300808, 'epoch': 0.11} + 11%|█ | 58/520 [03:45<28:23, 3.69s/it] 11%|█▏ | 59/520 [03:49<28:27, 3.70s/it] {'loss': 14.987, 'grad_norm': 7.936960707890472e-05, 'learning_rate': 8.839321965092825, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:49<28:27, 3.70s/it] 12%|█▏ | 60/520 [03:53<28:32, 3.72s/it] {'loss': 14.849, 'grad_norm': 8.423961480940435e-05, 'learning_rate': 8.831809111275053, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:53<28:32, 3.72s/it] 12%|█▏ | 61/520 [03:57<28:21, 3.71s/it] {'loss': 15.1016, 'grad_norm': 6.668244747609044e-05, 'learning_rate': 8.824127948752949, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:57<28:21, 3.71s/it] 12%|█▏ | 62/520 [04:00<28:11, 3.69s/it] {'loss': 14.8778, 'grad_norm': 9.675899952643017e-05, 'learning_rate': 8.816278775971444, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:00<28:11, 3.69s/it] 12%|█▏ | 63/520 [04:04<28:24, 3.73s/it] {'loss': 14.9722, 'grad_norm': 0.00012615313526084603, 'learning_rate': 8.808261897903382, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:04<28:24, 3.73s/it] 12%|█▏ | 64/520 [04:08<28:34, 3.76s/it] {'loss': 14.9873, 'grad_norm': 0.00011166630789053549, 'learning_rate': 8.800077626037634, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:08<28:34, 3.76s/it] 12%|█▎ | 65/520 [04:12<28:41, 3.78s/it] {'loss': 14.8011, 'grad_norm': 0.0001032216050421788, 'learning_rate': 8.79172627836702, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:12<28:41, 3.78s/it] 13%|█▎ | 66/520 [04:16<28:48, 3.81s/it] {'loss': 14.9038, 'grad_norm': 0.00011637989860118384, 'learning_rate': 8.78320817937595, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:16<28:48, 3.81s/it] 13%|█▎ | 67/520 [04:20<28:48, 3.82s/it] {'loss': 14.7324, 'grad_norm': 0.00011803804356561375, 'learning_rate': 8.774523660027807, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:20<28:48, 3.82s/it] 13%|█▎ | 68/520 [04:23<28:55, 3.84s/it] {'loss': 14.9438, 'grad_norm': 0.00016550778721843466, 'learning_rate': 8.765673057752092, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:23<28:55, 3.84s/it] 13%|█▎ | 69/520 [04:27<28:52, 3.84s/it] {'loss': 14.7683, 'grad_norm': 7.527117887091193e-05, 'learning_rate': 8.756656716431321, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:27<28:52, 3.84s/it] 13%|█▎ | 70/520 [04:31<28:53, 3.85s/it] {'loss': 14.8371, 'grad_norm': 8.605378852324911e-05, 'learning_rate': 8.747474986387655, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:31<28:53, 3.85s/it] 14%|█▎ | 71/520 [04:35<28:37, 3.83s/it] {'loss': 14.8825, 'grad_norm': 9.115235893226866e-05, 'learning_rate': 8.738128224369285, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:35<28:37, 3.83s/it] 14%|█▍ | 72/520 [04:39<28:32, 3.82s/it] {'loss': 14.9089, 'grad_norm': 7.353108497412024e-05, 'learning_rate': 8.728616793536588, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:39<28:32, 3.82s/it] 14%|█▍ | 73/520 [04:43<28:37, 3.84s/it] {'loss': 14.7818, 'grad_norm': 0.00010342369004901919, 'learning_rate': 8.718941063447996, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:43<28:37, 3.84s/it] 14%|█▍ | 74/520 [04:46<28:39, 3.85s/it] {'loss': 14.8289, 'grad_norm': 9.619779378612349e-05, 'learning_rate': 8.709101410045653, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:46<28:39, 3.85s/it] 14%|█▍ | 75/520 [04:50<28:19, 3.82s/it] {'loss': 14.9177, 'grad_norm': 8.758976304396314e-05, 'learning_rate': 8.699098215640799, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:50<28:19, 3.82s/it] 15%|█▍ | 76/520 [04:54<27:59, 3.78s/it] {'loss': 15.1015, 'grad_norm': 6.734563734823484e-05, 'learning_rate': 8.68893186889892, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:54<27:59, 3.78s/it] 15%|█▍ | 77/520 [04:58<27:37, 3.74s/it] {'loss': 14.6062, 'grad_norm': 0.00010880153643688584, 'learning_rate': 8.67860276482464, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:58<27:37, 3.74s/it] 15%|█▌ | 78/520 [05:01<27:30, 3.73s/it] {'loss': 14.8881, 'grad_norm': 8.052350393719017e-05, 'learning_rate': 8.66811130474639, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:01<27:30, 3.73s/it] 15%|█▌ | 79/520 [05:05<27:20, 3.72s/it] {'loss': 14.92, 'grad_norm': 8.117802901496362e-05, 'learning_rate': 8.65745789630079, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:05<27:20, 3.72s/it] 15%|█▌ | 80/520 [05:09<27:12, 3.71s/it] {'loss': 14.8907, 'grad_norm': 0.00019283475104362127, 'learning_rate': 8.646642953416833, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:09<27:12, 3.71s/it] 16%|█▌ | 81/520 [05:12<27:07, 3.71s/it] {'loss': 14.7323, 'grad_norm': 0.00010528923677303773, 'learning_rate': 8.635666896299792, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:12<27:07, 3.71s/it] 16%|█▌ | 82/520 [05:16<27:07, 3.72s/it] {'loss': 14.842, 'grad_norm': 0.00010036808051002741, 'learning_rate': 8.624530151414893, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:16<27:07, 3.72s/it] 16%|█▌ | 83/520 [05:20<27:04, 3.72s/it] {'loss': 14.8294, 'grad_norm': 0.00026886528115959536, 'learning_rate': 8.613233151470745, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:20<27:04, 3.72s/it] 16%|█▌ | 84/520 [05:24<26:59, 3.71s/it] {'loss': 14.8546, 'grad_norm': 6.14016915243113e-05, 'learning_rate': 8.60177633540253, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:24<26:59, 3.71s/it] 16%|█▋ | 85/520 [05:27<26:53, 3.71s/it] {'loss': 14.9583, 'grad_norm': 5.9901646858148176e-05, 'learning_rate': 8.590160148354949, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:27<26:53, 3.71s/it] 17%|█▋ | 86/520 [05:31<26:46, 3.70s/it] {'loss': 14.9943, 'grad_norm': 5.313899452173746e-05, 'learning_rate': 8.578385041664925, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:31<26:46, 3.70s/it] 17%|█▋ | 87/520 [05:35<26:41, 3.70s/it] {'loss': 14.8546, 'grad_norm': 6.0422611143408834e-05, 'learning_rate': 8.566451472844065, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:35<26:41, 3.70s/it] 17%|█▋ | 88/520 [05:38<26:34, 3.69s/it] {'loss': 15.1401, 'grad_norm': 6.237344647280985e-05, 'learning_rate': 8.554359905560887, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:38<26:34, 3.69s/it] 17%|█▋ | 89/520 [05:42<26:34, 3.70s/it] {'loss': 14.7571, 'grad_norm': 6.848582793288756e-05, 'learning_rate': 8.542110809622798, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:42<26:34, 3.70s/it] 17%|█▋ | 90/520 [05:46<26:32, 3.70s/it] {'loss': 14.763, 'grad_norm': 0.0001888455998276921, 'learning_rate': 8.529704660957854, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:46<26:32, 3.70s/it] 18%|█▊ | 91/520 [05:49<26:37, 3.72s/it] {'loss': 14.9082, 'grad_norm': 5.469910510218036e-05, 'learning_rate': 8.517141941596252, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:49<26:37, 3.72s/it] 18%|█▊ | 92/520 [05:53<26:43, 3.75s/it] {'loss': 14.865, 'grad_norm': 6.408807212252917e-05, 'learning_rate': 8.50442313965161, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:53<26:43, 3.75s/it] 18%|█▊ | 93/520 [05:57<26:34, 3.73s/it] {'loss': 14.7315, 'grad_norm': 7.265451169494722e-05, 'learning_rate': 8.491548749301998, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:57<26:34, 3.73s/it] 18%|█▊ | 94/520 [06:01<26:33, 3.74s/it] {'loss': 14.9755, 'grad_norm': 5.850513066963864e-05, 'learning_rate': 8.478519270770743, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:01<26:33, 3.74s/it] 18%|█▊ | 95/520 [06:04<26:24, 3.73s/it] {'loss': 14.6844, 'grad_norm': 6.383894907947917e-05, 'learning_rate': 8.465335210306991, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:04<26:24, 3.73s/it] 18%|█▊ | 96/520 [06:08<26:19, 3.73s/it] {'loss': 14.8793, 'grad_norm': 5.293436355189148e-05, 'learning_rate': 8.451997080166029, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:08<26:19, 3.73s/it] 19%|█▊ | 97/520 [06:12<26:14, 3.72s/it] {'loss': 14.6706, 'grad_norm': 8.483068280023954e-05, 'learning_rate': 8.438505398589392, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:12<26:14, 3.72s/it] 19%|█▉ | 98/520 [06:16<26:07, 3.72s/it] {'loss': 14.9743, 'grad_norm': 5.609918629058279e-05, 'learning_rate': 8.424860689784724, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:16<26:07, 3.72s/it] 19%|█▉ | 99/520 [06:19<26:02, 3.71s/it] {'loss': 14.7073, 'grad_norm': 6.501534124468104e-05, 'learning_rate': 8.411063483905409, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:19<26:02, 3.71s/it] 19%|█▉ | 100/520 [06:23<25:59, 3.71s/it] {'loss': 14.878, 'grad_norm': 5.401024813597546e-05, 'learning_rate': 8.397114317029974, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:23<25:59, 3.71s/it] 19%|█▉ | 101/520 [06:27<25:52, 3.70s/it] {'loss': 14.8922, 'grad_norm': 5.552124915026075e-05, 'learning_rate': 8.383013731141258, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:27<25:52, 3.70s/it] 20%|█▉ | 102/520 [06:30<25:46, 3.70s/it] {'loss': 14.6802, 'grad_norm': 6.912363574512485e-05, 'learning_rate': 8.368762274105356, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:30<25:46, 3.70s/it] 20%|█▉ | 103/520 [06:34<25:50, 3.72s/it] {'loss': 14.9354, 'grad_norm': 5.7696884684449814e-05, 'learning_rate': 8.354360499650332, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:34<25:50, 3.72s/it] 20%|██ | 104/520 [06:38<25:43, 3.71s/it] {'loss': 14.7994, 'grad_norm': 5.390684029776434e-05, 'learning_rate': 8.3398089673447, 'epoch': 0.2} + 20%|██ | 104/520 [06:38<25:43, 3.71s/it] 20%|██ | 105/520 [06:42<25:39, 3.71s/it] {'loss': 14.8642, 'grad_norm': 6.534722736968578e-05, 'learning_rate': 8.325108242575691, 'epoch': 0.2} + 20%|██ | 105/520 [06:42<25:39, 3.71s/it] 20%|██ | 106/520 [06:45<25:30, 3.70s/it] {'loss': 15.0543, 'grad_norm': 5.199322087634548e-05, 'learning_rate': 8.310258896527278, 'epoch': 0.2} + 20%|██ | 106/520 [06:45<25:30, 3.70s/it] 21%|██ | 107/520 [06:49<25:27, 3.70s/it] {'loss': 15.0228, 'grad_norm': 5.996541946226357e-05, 'learning_rate': 8.295261506157985, 'epoch': 0.21} + 21%|██ | 107/520 [06:49<25:27, 3.70s/it] 21%|██ | 108/520 [06:53<25:20, 3.69s/it] {'loss': 14.7701, 'grad_norm': 6.295239998333765e-05, 'learning_rate': 8.280116654178473, 'epoch': 0.21} + 21%|██ | 108/520 [06:53<25:20, 3.69s/it] 21%|██ | 109/520 [06:56<25:20, 3.70s/it] {'loss': 14.8195, 'grad_norm': 4.651322321511989e-05, 'learning_rate': 8.264824929028888, 'epoch': 0.21} + 21%|██ | 109/520 [06:56<25:20, 3.70s/it] 21%|██ | 110/520 [07:00<25:22, 3.71s/it] {'loss': 14.9587, 'grad_norm': 4.0833239410463724e-05, 'learning_rate': 8.24938692485602, 'epoch': 0.21} + 21%|██ | 110/520 [07:00<25:22, 3.71s/it] 21%|██▏ | 111/520 [07:04<25:24, 3.73s/it] {'loss': 14.8997, 'grad_norm': 4.186686033104541e-05, 'learning_rate': 8.233803241490193, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:04<25:24, 3.73s/it] 22%|██▏ | 112/520 [07:07<25:16, 3.72s/it] {'loss': 14.9445, 'grad_norm': 4.829622380961291e-05, 'learning_rate': 8.218074484421978, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:07<25:16, 3.72s/it] 22%|██▏ | 113/520 [07:11<25:04, 3.70s/it] {'loss': 14.8478, 'grad_norm': 4.567475127172735e-05, 'learning_rate': 8.20220126477865, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:11<25:04, 3.70s/it] 22%|██▏ | 114/520 [07:15<25:00, 3.70s/it] {'loss': 14.8743, 'grad_norm': 4.688142499692163e-05, 'learning_rate': 8.186184199300463, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:15<25:00, 3.70s/it] 22%|██▏ | 115/520 [07:18<24:52, 3.68s/it] {'loss': 14.9013, 'grad_norm': 4.054374983610023e-05, 'learning_rate': 8.17002391031667, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:18<24:52, 3.68s/it] 22%|██▏ | 116/520 [07:22<24:52, 3.69s/it] {'loss': 14.9563, 'grad_norm': 5.334872016817575e-05, 'learning_rate': 8.153721025721355, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:22<24:52, 3.69s/it] 22%|██▎ | 117/520 [07:26<24:47, 3.69s/it] {'loss': 14.9433, 'grad_norm': 4.4059639005680536e-05, 'learning_rate': 8.137276178949024, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:26<24:47, 3.69s/it] 23%|██▎ | 118/520 [07:30<24:43, 3.69s/it] {'loss': 14.9045, 'grad_norm': 4.1681503191785185e-05, 'learning_rate': 8.120690008950007, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:30<24:43, 3.69s/it] 23%|██▎ | 119/520 [07:33<24:40, 3.69s/it] {'loss': 14.879, 'grad_norm': 5.131281531266399e-05, 'learning_rate': 8.103963160165627, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:33<24:40, 3.69s/it] 23%|██▎ | 120/520 [07:37<24:31, 3.68s/it] {'loss': 14.6923, 'grad_norm': 7.84011398237191e-05, 'learning_rate': 8.08709628250315, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:37<24:31, 3.68s/it] 23%|██▎ | 121/520 [07:41<24:26, 3.68s/it] {'loss': 14.8908, 'grad_norm': 5.071210037179686e-05, 'learning_rate': 8.070090031310558, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:41<24:26, 3.68s/it] 23%|██▎ | 122/520 [07:44<24:23, 3.68s/it] {'loss': 14.8186, 'grad_norm': 4.314672213783568e-05, 'learning_rate': 8.05294506735106, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:44<24:23, 3.68s/it] 24%|██▎ | 123/520 [07:48<24:18, 3.67s/it] {'loss': 15.1327, 'grad_norm': 3.934946015902519e-05, 'learning_rate': 8.035662056777431, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:48<24:18, 3.67s/it] 24%|██▍ | 124/520 [07:52<24:15, 3.68s/it] {'loss': 14.7154, 'grad_norm': 4.344269475091321e-05, 'learning_rate': 8.018241671106134, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:52<24:15, 3.68s/it] 24%|██▍ | 125/520 [07:55<24:18, 3.69s/it] {'loss': 14.7154, 'grad_norm': 5.223763229127096e-05, 'learning_rate': 8.000684587191216, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:55<24:18, 3.69s/it] 24%|██▍ | 126/520 [08:00<25:37, 3.90s/it] {'loss': 14.9849, 'grad_norm': 4.1178806256876324e-05, 'learning_rate': 7.982991487198023, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:00<25:37, 3.90s/it] 24%|██▍ | 127/520 [08:04<25:19, 3.87s/it] {'loss': 14.6317, 'grad_norm': 5.364275221905431e-05, 'learning_rate': 7.9651630585766835, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:04<25:19, 3.87s/it] 25%|██▍ | 128/520 [08:07<24:57, 3.82s/it] {'loss': 14.7971, 'grad_norm': 4.34434015828538e-05, 'learning_rate': 7.947199994035401, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:07<24:57, 3.82s/it] 25%|██▍ | 129/520 [08:11<24:47, 3.80s/it] {'loss': 14.9202, 'grad_norm': 4.047170690083425e-05, 'learning_rate': 7.929102991513549, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:11<24:47, 3.80s/it] 25%|██▌ | 130/520 [08:15<24:31, 3.77s/it] {'loss': 14.8542, 'grad_norm': 4.518936786250301e-05, 'learning_rate': 7.910872754154538, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:15<24:31, 3.77s/it] 25%|██▌ | 131/520 [08:18<24:23, 3.76s/it] {'loss': 15.0695, 'grad_norm': 4.9427478385023516e-05, 'learning_rate': 7.892509990278509, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:18<24:23, 3.76s/it] 25%|██▌ | 132/520 [08:22<24:11, 3.74s/it] {'loss': 14.7247, 'grad_norm': 4.004751516074127e-05, 'learning_rate': 7.874015413354804, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:22<24:11, 3.74s/it] 26%|██▌ | 133/520 [08:26<23:56, 3.71s/it] {'loss': 14.6298, 'grad_norm': 5.707266396864948e-05, 'learning_rate': 7.8553897419742444, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:26<23:56, 3.71s/it] 26%|██▌ | 134/520 [08:29<23:51, 3.71s/it] {'loss': 14.8058, 'grad_norm': 4.067191385714482e-05, 'learning_rate': 7.83663369982122, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:29<23:51, 3.71s/it] 26%|██▌ | 135/520 [08:33<23:47, 3.71s/it] {'loss': 14.861, 'grad_norm': 4.3138726344042115e-05, 'learning_rate': 7.817748015645558, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:33<23:47, 3.71s/it] 26%|██▌ | 136/520 [08:37<23:39, 3.70s/it] {'loss': 14.8805, 'grad_norm': 6.987878545862609e-05, 'learning_rate': 7.798733423234218, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:37<23:39, 3.70s/it] 26%|██▋ | 137/520 [08:41<23:36, 3.70s/it] {'loss': 14.7418, 'grad_norm': 6.428000314613116e-05, 'learning_rate': 7.779590661382778, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:41<23:36, 3.70s/it] 27%|██▋ | 138/520 [08:44<23:27, 3.69s/it] {'loss': 14.8824, 'grad_norm': 6.133790785350406e-05, 'learning_rate': 7.760320473866727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:44<23:27, 3.69s/it] 27%|██▋ | 139/520 [08:48<23:19, 3.67s/it] {'loss': 14.8981, 'grad_norm': 3.85102346671351e-05, 'learning_rate': 7.74092360941257, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:48<23:19, 3.67s/it] 27%|██▋ | 140/520 [08:52<23:38, 3.73s/it] {'loss': 14.9594, 'grad_norm': 3.802621716734509e-05, 'learning_rate': 7.721400821668733, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:52<23:38, 3.73s/it] 27%|██▋ | 141/520 [08:56<23:48, 3.77s/it] {'loss': 14.9515, 'grad_norm': 4.085898153959385e-05, 'learning_rate': 7.7017528691762855, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:56<23:48, 3.77s/it] 27%|██▋ | 142/520 [08:59<23:57, 3.80s/it] {'loss': 15.0392, 'grad_norm': 4.4236681147042274e-05, 'learning_rate': 7.681980515339464, 'epoch': 0.27} + 27%|██▋ | 142/520 [08:59<23:57, 3.80s/it] 28%|██▊ | 143/520 [09:03<23:59, 3.82s/it] {'loss': 14.8048, 'grad_norm': 8.413404543948615e-05, 'learning_rate': 7.6620845283960115, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:03<23:59, 3.82s/it] 28%|██▊ | 144/520 [09:07<24:00, 3.83s/it] {'loss': 14.7352, 'grad_norm': 8.866878985781952e-05, 'learning_rate': 7.642065681387328, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:07<24:00, 3.83s/it] 28%|██▊ | 145/520 [09:11<24:02, 3.85s/it] {'loss': 14.7082, 'grad_norm': 5.143255238039525e-05, 'learning_rate': 7.621924752128438, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:11<24:02, 3.85s/it] 28%|██▊ | 146/520 [09:15<23:46, 3.82s/it] {'loss': 14.9487, 'grad_norm': 5.1333487400636735e-05, 'learning_rate': 7.601662523177762, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:15<23:46, 3.82s/it] 28%|██▊ | 147/520 [09:19<23:46, 3.82s/it] {'loss': 14.7563, 'grad_norm': 6.439755285115286e-05, 'learning_rate': 7.581279781806721, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:19<23:46, 3.82s/it] 28%|██▊ | 148/520 [09:22<23:45, 3.83s/it] {'loss': 14.9093, 'grad_norm': 5.909547146426493e-05, 'learning_rate': 7.560777319969136, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:22<23:45, 3.83s/it] 29%|██▊ | 149/520 [09:26<23:52, 3.86s/it] {'loss': 14.7299, 'grad_norm': 6.203230381951835e-05, 'learning_rate': 7.540155934270471, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:26<23:52, 3.86s/it] 29%|██▉ | 150/520 [09:30<23:45, 3.85s/it] {'loss': 14.9306, 'grad_norm': 3.3644084931777016e-05, 'learning_rate': 7.519416425936865, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:30<23:45, 3.85s/it] 29%|██▉ | 151/520 [09:34<23:46, 3.87s/it] {'loss': 14.7286, 'grad_norm': 5.598676106139984e-05, 'learning_rate': 7.498559600784017, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:34<23:46, 3.87s/it] 29%|██▉ | 152/520 [09:38<23:37, 3.85s/it] {'loss': 14.6851, 'grad_norm': 4.9191276330630656e-05, 'learning_rate': 7.477586269185868, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:38<23:37, 3.85s/it] 29%|██▉ | 153/520 [09:42<23:35, 3.86s/it] {'loss': 14.8497, 'grad_norm': 5.8743424947012206e-05, 'learning_rate': 7.456497246043113, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:42<23:35, 3.86s/it] 30%|██▉ | 154/520 [09:46<23:33, 3.86s/it] {'loss': 14.9162, 'grad_norm': 3.3635091476866276e-05, 'learning_rate': 7.435293350751545, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:46<23:33, 3.86s/it] 30%|██▉ | 155/520 [09:50<23:27, 3.85s/it] {'loss': 14.69, 'grad_norm': 7.233796244434934e-05, 'learning_rate': 7.413975407170216, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:50<23:27, 3.85s/it] 30%|███ | 156/520 [09:53<23:19, 3.84s/it] {'loss': 14.838, 'grad_norm': 8.307302449425403e-05, 'learning_rate': 7.392544243589427, 'epoch': 0.3} + 30%|███ | 156/520 [09:53<23:19, 3.84s/it] 30%|███ | 157/520 [09:57<22:56, 3.79s/it] {'loss': 14.8932, 'grad_norm': 4.680184601920555e-05, 'learning_rate': 7.371000692698539, 'epoch': 0.3} + 30%|███ | 157/520 [09:57<22:56, 3.79s/it] 30%|███ | 158/520 [10:01<22:34, 3.74s/it] {'loss': 14.8507, 'grad_norm': 4.366354726806829e-05, 'learning_rate': 7.34934559155363, 'epoch': 0.3} + 30%|███ | 158/520 [10:01<22:34, 3.74s/it] 31%|███ | 159/520 [10:04<22:27, 3.73s/it] {'loss': 14.8465, 'grad_norm': 6.234035760458593e-05, 'learning_rate': 7.327579781544963, 'epoch': 0.31} + 31%|███ | 159/520 [10:04<22:27, 3.73s/it] 31%|███ | 160/520 [10:08<22:11, 3.70s/it] {'loss': 14.7432, 'grad_norm': 5.007999486037999e-05, 'learning_rate': 7.305704108364301, 'epoch': 0.31} + 31%|███ | 160/520 [10:08<22:11, 3.70s/it] 31%|███ | 161/520 [10:12<22:02, 3.68s/it] {'loss': 14.8119, 'grad_norm': 6.309738944673659e-05, 'learning_rate': 7.283719421972047, 'epoch': 0.31} + 31%|███ | 161/520 [10:12<22:02, 3.68s/it] 31%|███ | 162/520 [10:15<21:58, 3.68s/it] {'loss': 14.8518, 'grad_norm': 5.006410902239435e-05, 'learning_rate': 7.261626576564214, 'epoch': 0.31} + 31%|███ | 162/520 [10:15<21:58, 3.68s/it] 31%|███▏ | 163/520 [10:19<21:47, 3.66s/it] {'loss': 14.6772, 'grad_norm': 0.00012718000576582948, 'learning_rate': 7.239426430539243, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:19<21:47, 3.66s/it] 32%|███▏ | 164/520 [10:23<21:43, 3.66s/it] {'loss': 14.6574, 'grad_norm': 8.348324328922529e-05, 'learning_rate': 7.217119846464648, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:23<21:43, 3.66s/it] 32%|███▏ | 165/520 [10:26<21:38, 3.66s/it] {'loss': 14.8112, 'grad_norm': 6.708355615340925e-05, 'learning_rate': 7.194707691043502, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:26<21:38, 3.66s/it] 32%|███▏ | 166/520 [10:30<21:36, 3.66s/it] {'loss': 14.8454, 'grad_norm': 6.152009186232856e-05, 'learning_rate': 7.172190835080757, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:30<21:36, 3.66s/it] 32%|███▏ | 167/520 [10:34<21:32, 3.66s/it] {'loss': 14.7559, 'grad_norm': 6.195512597793787e-05, 'learning_rate': 7.149570153449421, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:34<21:32, 3.66s/it] 32%|███▏ | 168/520 [10:37<21:32, 3.67s/it] {'loss': 14.8581, 'grad_norm': 7.296552260063755e-05, 'learning_rate': 7.126846525056555, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:37<21:32, 3.67s/it] 32%|███▎ | 169/520 [10:41<21:30, 3.68s/it] {'loss': 14.7557, 'grad_norm': 5.184943892166865e-05, 'learning_rate': 7.104020832809127, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:41<21:30, 3.68s/it] 33%|███▎ | 170/520 [10:45<21:28, 3.68s/it] {'loss': 14.9147, 'grad_norm': 6.580400120365249e-05, 'learning_rate': 7.081093963579708, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:45<21:28, 3.68s/it] 33%|███▎ | 171/520 [10:48<21:20, 3.67s/it] {'loss': 14.6171, 'grad_norm': 7.931217587969677e-05, 'learning_rate': 7.058066808172016, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:48<21:20, 3.67s/it] 33%|███▎ | 172/520 [10:52<21:16, 3.67s/it] {'loss': 14.8802, 'grad_norm': 4.4524580093940334e-05, 'learning_rate': 7.034940261286299, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:52<21:16, 3.67s/it] 33%|███▎ | 173/520 [10:56<21:12, 3.67s/it] {'loss': 14.7667, 'grad_norm': 6.617628908010602e-05, 'learning_rate': 7.011715221484579, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:56<21:12, 3.67s/it] 33%|███▎ | 174/520 [10:59<21:10, 3.67s/it] {'loss': 14.6889, 'grad_norm': 4.294386229359572e-05, 'learning_rate': 6.988392591155727, 'epoch': 0.33} + 33%|███▎ | 174/520 [10:59<21:10, 3.67s/it] 34%|███▎ | 175/520 [11:03<21:07, 3.67s/it] {'loss': 14.792, 'grad_norm': 6.415887251088329e-05, 'learning_rate': 6.964973276480421, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:03<21:07, 3.67s/it] 34%|███▍ | 176/520 [11:07<21:04, 3.67s/it] {'loss': 14.9418, 'grad_norm': 3.678194711292715e-05, 'learning_rate': 6.941458187395917, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:07<21:04, 3.67s/it] 34%|███▍ | 177/520 [11:10<20:59, 3.67s/it] {'loss': 14.7671, 'grad_norm': 3.645824876923716e-05, 'learning_rate': 6.917848237560708, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:10<20:59, 3.67s/it] 34%|███▍ | 178/520 [11:14<20:57, 3.68s/it] {'loss': 14.7615, 'grad_norm': 5.9520536980729806e-05, 'learning_rate': 6.894144344319015, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:14<20:57, 3.68s/it] 34%|███▍ | 179/520 [11:18<20:52, 3.67s/it] {'loss': 14.846, 'grad_norm': 5.803473939902275e-05, 'learning_rate': 6.870347428665153, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:18<20:52, 3.67s/it] 35%|███▍ | 180/520 [11:21<20:47, 3.67s/it] {'loss': 14.8816, 'grad_norm': 5.840011626489655e-05, 'learning_rate': 6.846458415207741, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:21<20:47, 3.67s/it] 35%|███▍ | 181/520 [11:25<20:45, 3.67s/it] {'loss': 14.919, 'grad_norm': 5.194895123560967e-05, 'learning_rate': 6.82247823213378, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:25<20:45, 3.67s/it] 35%|███▌ | 182/520 [11:29<20:38, 3.66s/it] {'loss': 14.739, 'grad_norm': 4.912571924432501e-05, 'learning_rate': 6.798407811172586, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:29<20:38, 3.66s/it] 35%|███▌ | 183/520 [11:32<20:34, 3.66s/it] {'loss': 14.8429, 'grad_norm': 5.5844585876358796e-05, 'learning_rate': 6.774248087559589, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:32<20:34, 3.66s/it] 35%|███▌ | 184/520 [11:36<20:32, 3.67s/it] {'loss': 14.7355, 'grad_norm': 6.305529107722824e-05, 'learning_rate': 6.75, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:36<20:32, 3.67s/it] 36%|███▌ | 185/520 [11:40<20:31, 3.67s/it] {'loss': 14.9129, 'grad_norm': 4.3820793027230286e-05, 'learning_rate': 6.725664490632333, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:40<20:31, 3.67s/it] 36%|███▌ | 186/520 [11:43<20:32, 3.69s/it] {'loss': 14.819, 'grad_norm': 3.5681126347479536e-05, 'learning_rate': 6.701242504991802, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:43<20:32, 3.69s/it] 36%|███▌ | 187/520 [11:47<20:27, 3.69s/it] {'loss': 14.6731, 'grad_norm': 6.133018580576765e-05, 'learning_rate': 6.6767349919735794, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:47<20:27, 3.69s/it] 36%|███▌ | 188/520 [11:51<20:26, 3.70s/it] {'loss': 14.8081, 'grad_norm': 5.383998271918495e-05, 'learning_rate': 6.652142903795932, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:51<20:26, 3.70s/it] 36%|███▋ | 189/520 [11:55<20:26, 3.71s/it] {'loss': 14.93, 'grad_norm': 5.6239616948754563e-05, 'learning_rate': 6.627467195963222, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:55<20:26, 3.71s/it] 37%|███▋ | 190/520 [11:58<20:17, 3.69s/it] {'loss': 14.703, 'grad_norm': 5.2567731624469226e-05, 'learning_rate': 6.6027088272287795, 'epoch': 0.37} + 37%|███▋ | 190/520 [11:58<20:17, 3.69s/it] 37%|███▋ | 191/520 [12:02<20:13, 3.69s/it] {'loss': 14.7236, 'grad_norm': 5.631211835868167e-05, 'learning_rate': 6.577868759557654, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:02<20:13, 3.69s/it] 37%|███▋ | 192/520 [12:06<20:12, 3.70s/it] {'loss': 14.7567, 'grad_norm': 9.642247143067083e-05, 'learning_rate': 6.552947958089233, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:06<20:12, 3.70s/it] 37%|███▋ | 193/520 [12:09<20:07, 3.69s/it] {'loss': 14.8176, 'grad_norm': 4.8300524869011496e-05, 'learning_rate': 6.5279473910997545, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:09<20:07, 3.69s/it] 37%|███▋ | 194/520 [12:13<20:02, 3.69s/it] {'loss': 14.9092, 'grad_norm': 5.268689106866895e-05, 'learning_rate': 6.502868029964665, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:13<20:02, 3.69s/it] 38%|███▊ | 195/520 [12:17<19:56, 3.68s/it] {'loss': 14.9463, 'grad_norm': 4.551249072732555e-05, 'learning_rate': 6.477710849120903, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:17<19:56, 3.68s/it] 38%|███▊ | 196/520 [12:20<19:54, 3.69s/it] {'loss': 14.7981, 'grad_norm': 5.285565460545329e-05, 'learning_rate': 6.452476826029011, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:20<19:54, 3.69s/it] 38%|███▊ | 197/520 [12:24<19:58, 3.71s/it] {'loss': 14.8099, 'grad_norm': 5.8474776261856386e-05, 'learning_rate': 6.427166941135182, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:24<19:58, 3.71s/it] 38%|███▊ | 198/520 [12:28<20:06, 3.75s/it] {'loss': 14.8439, 'grad_norm': 4.451644772969527e-05, 'learning_rate': 6.401782177833148, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:28<20:06, 3.75s/it] 38%|███▊ | 199/520 [12:32<20:15, 3.79s/it] {'loss': 14.6785, 'grad_norm': 5.751763677841358e-05, 'learning_rate': 6.376323522425977, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:32<20:15, 3.79s/it] 38%|███▊ | 200/520 [12:36<20:18, 3.81s/it] {'loss': 14.9001, 'grad_norm': 3.833293956701176e-05, 'learning_rate': 6.350791964087753, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:36<20:18, 3.81s/it] 39%|███▊ | 201/520 [12:39<20:16, 3.81s/it] {'loss': 14.9558, 'grad_norm': 2.8989799043459138e-05, 'learning_rate': 6.325188494825138, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:39<20:16, 3.81s/it] 39%|███▉ | 202/520 [12:43<20:01, 3.78s/it] {'loss': 14.8248, 'grad_norm': 3.891147759658235e-05, 'learning_rate': 6.299514109438833, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:43<20:01, 3.78s/it] 39%|███▉ | 203/520 [12:47<19:47, 3.75s/it] {'loss': 14.8238, 'grad_norm': 4.987058692285645e-05, 'learning_rate': 6.273769805484927, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:47<19:47, 3.75s/it] 39%|███▉ | 204/520 [12:51<19:36, 3.72s/it] {'loss': 14.8623, 'grad_norm': 3.79754487076468e-05, 'learning_rate': 6.247956583236126, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:51<19:36, 3.72s/it] 39%|███▉ | 205/520 [12:54<19:29, 3.71s/it] {'loss': 15.001, 'grad_norm': 3.532649452786795e-05, 'learning_rate': 6.222075445642904, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:54<19:29, 3.71s/it] 40%|███▉ | 206/520 [12:58<19:25, 3.71s/it] {'loss': 14.7963, 'grad_norm': 3.839108531014216e-05, 'learning_rate': 6.196127398294523, 'epoch': 0.4} + 40%|███▉ | 206/520 [12:58<19:25, 3.71s/it] 40%|███▉ | 207/520 [13:02<19:24, 3.72s/it] {'loss': 14.9789, 'grad_norm': 5.1718789618817186e-05, 'learning_rate': 6.17011344937997, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:02<19:24, 3.72s/it] 40%|████ | 208/520 [13:05<19:20, 3.72s/it] {'loss': 14.6465, 'grad_norm': 3.9185863871828894e-05, 'learning_rate': 6.144034609648779, 'epoch': 0.4} + 40%|████ | 208/520 [13:05<19:20, 3.72s/it] 40%|████ | 209/520 [13:09<19:19, 3.73s/it] {'loss': 14.7311, 'grad_norm': 3.2126051873516615e-05, 'learning_rate': 6.117891892371754, 'epoch': 0.4} + 40%|████ | 209/520 [13:09<19:19, 3.73s/it] 40%|████ | 210/520 [13:13<19:23, 3.75s/it] {'loss': 14.8565, 'grad_norm': 6.904374172175885e-05, 'learning_rate': 6.091686313301616, 'epoch': 0.4} + 40%|████ | 210/520 [13:13<19:23, 3.75s/it] 41%|████ | 211/520 [13:17<19:31, 3.79s/it] {'loss': 14.8063, 'grad_norm': 3.2852972440163086e-05, 'learning_rate': 6.065418890633522, 'epoch': 0.41} + 41%|████ | 211/520 [13:17<19:31, 3.79s/it] 41%|████ | 212/520 [13:21<19:39, 3.83s/it] {'loss': 14.9785, 'grad_norm': 3.3819661143050886e-05, 'learning_rate': 6.03909064496551, 'epoch': 0.41} + 41%|████ | 212/520 [13:21<19:39, 3.83s/it] 41%|████ | 213/520 [13:25<19:45, 3.86s/it] {'loss': 14.645, 'grad_norm': 3.7022551747341614e-05, 'learning_rate': 6.012702599258839, 'epoch': 0.41} + 41%|████ | 213/520 [13:25<19:45, 3.86s/it] 41%|████ | 214/520 [13:29<19:42, 3.86s/it] {'loss': 14.7923, 'grad_norm': 4.273709542775563e-05, 'learning_rate': 5.986255778798252, 'epoch': 0.41} + 41%|████ | 214/520 [13:29<19:42, 3.86s/it] 41%|████▏ | 215/520 [13:32<19:41, 3.87s/it] {'loss': 14.7322, 'grad_norm': 4.54988612210658e-05, 'learning_rate': 5.959751211152132, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:32<19:41, 3.87s/it] 42%|████▏ | 216/520 [13:36<19:40, 3.88s/it] {'loss': 14.707, 'grad_norm': 6.876946742716499e-05, 'learning_rate': 5.933189926132581, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:36<19:40, 3.88s/it] 42%|████▏ | 217/520 [13:40<19:39, 3.89s/it] {'loss': 14.8147, 'grad_norm': 4.26414730886512e-05, 'learning_rate': 5.906572955755401, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:40<19:39, 3.89s/it] 42%|████▏ | 218/520 [13:44<19:37, 3.90s/it] {'loss': 14.6022, 'grad_norm': 3.074579354280429e-05, 'learning_rate': 5.879901334200005, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:44<19:37, 3.90s/it] 42%|████▏ | 219/520 [13:48<19:31, 3.89s/it] {'loss': 14.8505, 'grad_norm': 2.722011503025906e-05, 'learning_rate': 5.853176097769229, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:48<19:31, 3.89s/it] 42%|████▏ | 220/520 [13:52<19:28, 3.90s/it] {'loss': 14.7918, 'grad_norm': 2.8146725032538796e-05, 'learning_rate': 5.826398284849069, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:52<19:28, 3.90s/it] 42%|████▎ | 221/520 [13:56<19:26, 3.90s/it] {'loss': 14.7401, 'grad_norm': 5.821043693329966e-05, 'learning_rate': 5.799568935868335, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:56<19:26, 3.90s/it] 43%|████▎ | 222/520 [14:00<19:23, 3.90s/it] {'loss': 14.7582, 'grad_norm': 7.81097641274875e-05, 'learning_rate': 5.772689093258224, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:00<19:23, 3.90s/it] 43%|████▎ | 223/520 [14:04<19:18, 3.90s/it] {'loss': 14.8328, 'grad_norm': 3.653102094567976e-05, 'learning_rate': 5.745759801411822, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:04<19:18, 3.90s/it] 43%|████▎ | 224/520 [14:08<19:13, 3.90s/it] {'loss': 15.0095, 'grad_norm': 2.311608595703292e-05, 'learning_rate': 5.718782106643523, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:08<19:13, 3.90s/it] 43%|████▎ | 225/520 [14:11<19:09, 3.90s/it] {'loss': 14.8299, 'grad_norm': 2.8181470150897658e-05, 'learning_rate': 5.691757057148372, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:11<19:09, 3.90s/it] 43%|████▎ | 226/520 [14:15<18:57, 3.87s/it] {'loss': 14.8895, 'grad_norm': 2.6894522658347056e-05, 'learning_rate': 5.664685702961343, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:15<18:57, 3.87s/it] 44%|████▎ | 227/520 [14:19<18:36, 3.81s/it] {'loss': 14.8526, 'grad_norm': 3.8814674347511416e-05, 'learning_rate': 5.637569095916538, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:19<18:36, 3.81s/it] 44%|████▍ | 228/520 [14:23<18:21, 3.77s/it] {'loss': 14.8333, 'grad_norm': 4.607400015374131e-05, 'learning_rate': 5.610408289606321, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:23<18:21, 3.77s/it] 44%|████▍ | 229/520 [14:26<18:10, 3.75s/it] {'loss': 14.8791, 'grad_norm': 3.190981090787455e-05, 'learning_rate': 5.583204339340379, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:26<18:10, 3.75s/it] 44%|████▍ | 230/520 [14:30<18:09, 3.76s/it] {'loss': 14.7241, 'grad_norm': 4.2535401434172143e-05, 'learning_rate': 5.5559583021047185, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:30<18:09, 3.76s/it] 44%|████▍ | 231/520 [14:34<18:03, 3.75s/it] {'loss': 14.7856, 'grad_norm': 3.187957747088508e-05, 'learning_rate': 5.528671236520603, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:34<18:03, 3.75s/it] 45%|████▍ | 232/520 [14:38<18:01, 3.76s/it] {'loss': 14.9816, 'grad_norm': 5.284828858077285e-05, 'learning_rate': 5.5013442028034145, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:38<18:01, 3.76s/it] 45%|████▍ | 233/520 [14:41<17:59, 3.76s/it] {'loss': 14.8156, 'grad_norm': 2.974237046596085e-05, 'learning_rate': 5.473978262721463, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:41<17:59, 3.76s/it] 45%|████▌ | 234/520 [14:45<17:54, 3.76s/it] {'loss': 14.6797, 'grad_norm': 4.514639650070544e-05, 'learning_rate': 5.446574479554731, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:45<17:54, 3.76s/it] 45%|████▌ | 235/520 [14:49<17:47, 3.75s/it] {'loss': 14.7537, 'grad_norm': 3.14907654390411e-05, 'learning_rate': 5.419133918053562, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:49<17:47, 3.75s/it] 45%|████▌ | 236/520 [14:53<17:40, 3.73s/it] {'loss': 14.9431, 'grad_norm': 2.728357599750441e-05, 'learning_rate': 5.39165764439729, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:53<17:40, 3.73s/it] 46%|████▌ | 237/520 [14:56<17:36, 3.73s/it] {'loss': 14.8823, 'grad_norm': 3.47724993901805e-05, 'learning_rate': 5.364146726152813, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:56<17:36, 3.73s/it] 46%|████▌ | 238/520 [15:00<17:34, 3.74s/it] {'loss': 14.6061, 'grad_norm': 3.125820701424159e-05, 'learning_rate': 5.336602232233116, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:00<17:34, 3.74s/it] 46%|████▌ | 239/520 [15:04<17:38, 3.77s/it] {'loss': 14.7677, 'grad_norm': 2.391565421520783e-05, 'learning_rate': 5.309025232855737, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:04<17:38, 3.77s/it] 46%|████▌ | 240/520 [15:08<17:30, 3.75s/it] {'loss': 14.6129, 'grad_norm': 4.0697831835497294e-05, 'learning_rate': 5.281416799501187, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:08<17:30, 3.75s/it] 46%|████▋ | 241/520 [15:11<17:25, 3.75s/it] {'loss': 14.7481, 'grad_norm': 3.3300757525237894e-05, 'learning_rate': 5.253778004871314, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:11<17:25, 3.75s/it] 47%|████▋ | 242/520 [15:15<17:18, 3.74s/it] {'loss': 14.7593, 'grad_norm': 3.7603425032469505e-05, 'learning_rate': 5.22610992284763, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:15<17:18, 3.74s/it] 47%|████▋ | 243/520 [15:19<17:15, 3.74s/it] {'loss': 14.7301, 'grad_norm': 5.11536233416935e-05, 'learning_rate': 5.198413628449582, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:19<17:15, 3.74s/it] 47%|████▋ | 244/520 [15:23<17:14, 3.75s/it] {'loss': 14.8989, 'grad_norm': 4.2612554299548994e-05, 'learning_rate': 5.170690197792784, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:23<17:14, 3.75s/it] 47%|████▋ | 245/520 [15:26<17:11, 3.75s/it] {'loss': 14.7426, 'grad_norm': 5.026943691807388e-05, 'learning_rate': 5.1429407080472105, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:26<17:11, 3.75s/it] 47%|████▋ | 246/520 [15:30<17:10, 3.76s/it] {'loss': 14.8905, 'grad_norm': 2.6609657208088696e-05, 'learning_rate': 5.11516623739533, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:30<17:10, 3.76s/it] 48%|████▊ | 247/520 [15:34<17:04, 3.75s/it] {'loss': 14.8662, 'grad_norm': 2.8285244889951024e-05, 'learning_rate': 5.087367864990233, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:34<17:04, 3.75s/it] 48%|████▊ | 248/520 [15:37<16:52, 3.72s/it] {'loss': 14.7782, 'grad_norm': 3.713631404483619e-05, 'learning_rate': 5.059546670913684, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:37<16:52, 3.72s/it] 48%|████▊ | 249/520 [15:41<16:46, 3.71s/it] {'loss': 14.7989, 'grad_norm': 2.9518797070674018e-05, 'learning_rate': 5.031703736134168, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:41<16:46, 3.71s/it] 48%|████▊ | 250/520 [15:45<16:43, 3.72s/it] {'loss': 14.6141, 'grad_norm': 5.7789324209698474e-05, 'learning_rate': 5.0038401424648855, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:45<16:43, 3.72s/it] 48%|████▊ | 251/520 [15:49<16:40, 3.72s/it] {'loss': 14.7908, 'grad_norm': 2.5524082849155242e-05, 'learning_rate': 4.97595697252172, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:49<16:40, 3.72s/it] 48%|████▊ | 252/520 [15:52<16:35, 3.72s/it] {'loss': 14.8316, 'grad_norm': 3.156600302575314e-05, 'learning_rate': 4.948055309681175, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:52<16:35, 3.72s/it] 49%|████▊ | 253/520 [15:56<16:29, 3.71s/it] {'loss': 14.6386, 'grad_norm': 4.380261371573121e-05, 'learning_rate': 4.920136238038277, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:56<16:29, 3.71s/it] 49%|████▉ | 254/520 [16:00<16:27, 3.71s/it] {'loss': 14.8516, 'grad_norm': 3.547231177702502e-05, 'learning_rate': 4.8922008423644625, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:00<16:27, 3.71s/it] 49%|████▉ | 255/520 [16:03<16:22, 3.71s/it] {'loss': 14.7174, 'grad_norm': 4.1050471742242446e-05, 'learning_rate': 4.864250208065415, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:03<16:22, 3.71s/it] 49%|████▉ | 256/520 [16:07<16:15, 3.70s/it] {'loss': 14.76, 'grad_norm': 5.2476952693791597e-05, 'learning_rate': 4.83628542113891, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:07<16:15, 3.70s/it] 49%|████▉ | 257/520 [16:11<16:12, 3.70s/it] {'loss': 14.7841, 'grad_norm': 4.70972112255961e-05, 'learning_rate': 4.808307568132605, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:11<16:12, 3.70s/it] 50%|████▉ | 258/520 [16:14<16:08, 3.70s/it] {'loss': 14.836, 'grad_norm': 3.246441501878192e-05, 'learning_rate': 4.780317736101835, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:14<16:08, 3.70s/it] 50%|████▉ | 259/520 [16:18<16:00, 3.68s/it] {'loss': 14.7936, 'grad_norm': 2.228043178929343e-05, 'learning_rate': 4.752317012567363, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:18<16:00, 3.68s/it] 50%|█████ | 260/520 [16:22<16:06, 3.72s/it] {'loss': 14.9749, 'grad_norm': 2.675578782861227e-05, 'learning_rate': 4.724306485473138, 'epoch': 0.5} + 50%|█████ | 260/520 [16:22<16:06, 3.72s/it] 50%|█████ | 261/520 [16:26<16:02, 3.72s/it] {'loss': 14.8263, 'grad_norm': 2.2567610807951722e-05, 'learning_rate': 4.696287243144012, 'epoch': 0.5} + 50%|█████ | 261/520 [16:26<16:02, 3.72s/it] 50%|█████ | 262/520 [16:29<15:58, 3.71s/it] {'loss': 14.7541, 'grad_norm': 3.7691616782676286e-05, 'learning_rate': 4.6682603742434665, 'epoch': 0.5} + 50%|█████ | 262/520 [16:29<15:58, 3.71s/it] 51%|█████ | 263/520 [16:33<15:56, 3.72s/it] {'loss': 14.8845, 'grad_norm': 2.2030368789680523e-05, 'learning_rate': 4.6402269677313, 'epoch': 0.51} + 51%|█████ | 263/520 [16:33<15:56, 3.72s/it] 51%|█████ | 264/520 [16:37<15:53, 3.72s/it] {'loss': 14.8255, 'grad_norm': 2.8242089430512283e-05, 'learning_rate': 4.612188112821328, 'epoch': 0.51} + 51%|█████ | 264/520 [16:37<15:53, 3.72s/it] 51%|█████ | 265/520 [16:41<15:46, 3.71s/it] {'loss': 14.6154, 'grad_norm': 3.1795679688382626e-05, 'learning_rate': 4.58414489893906, 'epoch': 0.51} + 51%|█████ | 265/520 [16:41<15:46, 3.71s/it] 51%|█████ | 266/520 [16:44<15:44, 3.72s/it] {'loss': 14.7779, 'grad_norm': 2.3801904488768478e-05, 'learning_rate': 4.556098415679368, 'epoch': 0.51} + 51%|█████ | 266/520 [16:44<15:44, 3.72s/it] 51%|█████▏ | 267/520 [16:48<15:38, 3.71s/it] {'loss': 14.8118, 'grad_norm': 2.0122957609221474e-05, 'learning_rate': 4.528049752764151, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:48<15:38, 3.71s/it] 52%|█████▏ | 268/520 [16:52<15:38, 3.72s/it] {'loss': 14.8648, 'grad_norm': 1.6631280768271456e-05, 'learning_rate': 4.5, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:52<15:38, 3.72s/it] 52%|█████▏ | 269/520 [16:55<15:31, 3.71s/it] {'loss': 14.7078, 'grad_norm': 2.1713733734181107e-05, 'learning_rate': 4.471950247235849, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:55<15:31, 3.71s/it] 52%|█████▏ | 270/520 [16:59<15:26, 3.71s/it] {'loss': 14.8094, 'grad_norm': 2.1968362552684066e-05, 'learning_rate': 4.443901584320632, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:59<15:26, 3.71s/it] 52%|█████▏ | 271/520 [17:03<15:21, 3.70s/it] {'loss': 14.808, 'grad_norm': 2.1709446083691373e-05, 'learning_rate': 4.415855101060941, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:03<15:21, 3.70s/it] 52%|█████▏ | 272/520 [17:06<15:18, 3.70s/it] {'loss': 14.8345, 'grad_norm': 3.362625722251144e-05, 'learning_rate': 4.387811887178673, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:06<15:18, 3.70s/it] 52%|█████▎ | 273/520 [17:10<15:13, 3.70s/it] {'loss': 14.9731, 'grad_norm': 2.2560021744100492e-05, 'learning_rate': 4.359773032268702, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:10<15:13, 3.70s/it] 53%|█████▎ | 274/520 [17:14<15:12, 3.71s/it] {'loss': 14.8015, 'grad_norm': 3.000922185075004e-05, 'learning_rate': 4.331739625756535, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:14<15:12, 3.71s/it] 53%|█████▎ | 275/520 [17:18<15:09, 3.71s/it] {'loss': 14.714, 'grad_norm': 5.685699449806731e-05, 'learning_rate': 4.3037127568559885, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:18<15:09, 3.71s/it] 53%|█████▎ | 276/520 [17:21<15:05, 3.71s/it] {'loss': 14.8138, 'grad_norm': 4.533996229408357e-05, 'learning_rate': 4.275693514526862, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:21<15:05, 3.71s/it] 53%|█████▎ | 277/520 [17:25<15:00, 3.71s/it] {'loss': 14.9279, 'grad_norm': 7.610006948409223e-05, 'learning_rate': 4.247682987432636, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:25<15:00, 3.71s/it] 53%|█████▎ | 278/520 [17:29<14:53, 3.69s/it] {'loss': 14.7661, 'grad_norm': 6.155050010254906e-05, 'learning_rate': 4.219682263898165, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:29<14:53, 3.69s/it] 54%|█████▎ | 279/520 [17:32<14:47, 3.68s/it] {'loss': 14.7902, 'grad_norm': 3.736868377527871e-05, 'learning_rate': 4.191692431867395, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:32<14:47, 3.68s/it] 54%|█████▍ | 280/520 [17:36<14:45, 3.69s/it] {'loss': 14.6763, 'grad_norm': 3.5574749106440536e-05, 'learning_rate': 4.163714578861091, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:36<14:45, 3.69s/it] 54%|█████▍ | 281/520 [17:40<14:41, 3.69s/it] {'loss': 14.7721, 'grad_norm': 2.484206593369104e-05, 'learning_rate': 4.135749791934585, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:40<14:41, 3.69s/it] 54%|█████▍ | 282/520 [17:43<14:37, 3.68s/it] {'loss': 14.8037, 'grad_norm': 2.7918966157390843e-05, 'learning_rate': 4.1077991576355375, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:43<14:37, 3.68s/it] 54%|█████▍ | 283/520 [17:47<14:31, 3.68s/it] {'loss': 14.7919, 'grad_norm': 2.5147671986010546e-05, 'learning_rate': 4.079863761961723, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:47<14:31, 3.68s/it] 55%|█████▍ | 284/520 [17:51<14:31, 3.69s/it] {'loss': 14.8179, 'grad_norm': 3.679267667132738e-05, 'learning_rate': 4.051944690318826, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:51<14:31, 3.69s/it] 55%|█████▍ | 285/520 [17:54<14:29, 3.70s/it] {'loss': 14.813, 'grad_norm': 2.31490546561659e-05, 'learning_rate': 4.024043027478281, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:54<14:29, 3.70s/it] 55%|█████▌ | 286/520 [17:58<14:25, 3.70s/it] {'loss': 14.5655, 'grad_norm': 2.707258543175265e-05, 'learning_rate': 3.9961598575351145, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:58<14:25, 3.70s/it] 55%|█████▌ | 287/520 [18:02<14:20, 3.69s/it] {'loss': 14.6704, 'grad_norm': 2.7459907029750125e-05, 'learning_rate': 3.9682962638658323, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:02<14:20, 3.69s/it] 55%|█████▌ | 288/520 [18:06<14:17, 3.70s/it] {'loss': 14.9364, 'grad_norm': 3.0344997487297797e-05, 'learning_rate': 3.940453329086318, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:06<14:17, 3.70s/it] 56%|█████▌ | 289/520 [18:09<14:12, 3.69s/it] {'loss': 14.6359, 'grad_norm': 3.4616368508406836e-05, 'learning_rate': 3.912632135009769, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:09<14:12, 3.69s/it] 56%|█████▌ | 290/520 [18:13<14:10, 3.70s/it] {'loss': 14.8106, 'grad_norm': 3.689605528480382e-05, 'learning_rate': 3.8848337626046705, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:13<14:10, 3.70s/it] 56%|█████▌ | 291/520 [18:17<14:06, 3.70s/it] {'loss': 14.8107, 'grad_norm': 3.0462654849970816e-05, 'learning_rate': 3.857059291952791, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:17<14:06, 3.70s/it] 56%|█████▌ | 292/520 [18:20<14:02, 3.70s/it] {'loss': 14.8275, 'grad_norm': 2.72516084517126e-05, 'learning_rate': 3.8293098022072147, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:20<14:02, 3.70s/it] 56%|█████▋ | 293/520 [18:24<13:57, 3.69s/it] {'loss': 14.7488, 'grad_norm': 3.8365538407961494e-05, 'learning_rate': 3.8015863715504175, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:24<13:57, 3.69s/it] 57%|█████▋ | 294/520 [18:28<13:52, 3.68s/it] {'loss': 14.6433, 'grad_norm': 3.234008581453749e-05, 'learning_rate': 3.7738900771523696, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:28<13:52, 3.68s/it] 57%|█████▋ | 295/520 [18:31<13:54, 3.71s/it] {'loss': 14.8773, 'grad_norm': 2.9760426991902668e-05, 'learning_rate': 3.746221995128687, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:31<13:54, 3.71s/it] 57%|█████▋ | 296/520 [18:35<13:56, 3.73s/it] {'loss': 14.7735, 'grad_norm': 3.075408570006212e-05, 'learning_rate': 3.7185832004988137, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:35<13:56, 3.73s/it] 57%|█████▋ | 297/520 [18:39<13:45, 3.70s/it] {'loss': 14.8004, 'grad_norm': 2.4879714166599946e-05, 'learning_rate': 3.690974767144263, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:39<13:45, 3.70s/it] 57%|█████▋ | 298/520 [18:43<13:38, 3.69s/it] {'loss': 14.9624, 'grad_norm': 4.056014342031561e-05, 'learning_rate': 3.6633977677668845, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:43<13:38, 3.69s/it] 57%|█████▊ | 299/520 [18:46<13:33, 3.68s/it] {'loss': 14.9764, 'grad_norm': 3.0991935305355946e-05, 'learning_rate': 3.6358532738471876, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:46<13:33, 3.68s/it] 58%|█████▊ | 300/520 [18:50<13:29, 3.68s/it] {'loss': 14.8116, 'grad_norm': 2.5147935001264352e-05, 'learning_rate': 3.6083423556027117, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:50<13:29, 3.68s/it] 58%|█████▊ | 301/520 [18:54<13:23, 3.67s/it] {'loss': 14.8832, 'grad_norm': 3.2434684131878896e-05, 'learning_rate': 3.5808660819464393, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:54<13:23, 3.67s/it] 58%|█████▊ | 302/520 [18:57<13:19, 3.67s/it] {'loss': 14.9187, 'grad_norm': 2.821353408302728e-05, 'learning_rate': 3.55342552044527, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:57<13:19, 3.67s/it] 58%|█████▊ | 303/520 [19:01<13:13, 3.66s/it] {'loss': 14.7618, 'grad_norm': 4.260784475216824e-05, 'learning_rate': 3.526021737278537, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:01<13:13, 3.66s/it] 58%|█████▊ | 304/520 [19:05<13:29, 3.75s/it] {'loss': 14.5699, 'grad_norm': 7.01067987880863e-05, 'learning_rate': 3.4986557971965855, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:05<13:29, 3.75s/it] 59%|█████▊ | 305/520 [19:08<13:17, 3.71s/it] {'loss': 14.7797, 'grad_norm': 2.7599881651977263e-05, 'learning_rate': 3.4713287634793977, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:08<13:17, 3.71s/it] 59%|█████▉ | 306/520 [19:12<13:13, 3.71s/it] {'loss': 14.8124, 'grad_norm': 3.0285213598111047e-05, 'learning_rate': 3.4440416978952824, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:12<13:13, 3.71s/it] 59%|█████▉ | 307/520 [19:16<13:06, 3.69s/it] {'loss': 14.8173, 'grad_norm': 4.842506828884291e-05, 'learning_rate': 3.4167956606596226, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:16<13:06, 3.69s/it] 59%|█████▉ | 308/520 [19:19<13:01, 3.68s/it] {'loss': 14.9311, 'grad_norm': 3.4337574573883806e-05, 'learning_rate': 3.3895917103936783, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:19<13:01, 3.68s/it] 59%|█████▉ | 309/520 [19:23<13:05, 3.72s/it] {'loss': 14.9297, 'grad_norm': 4.917758068917477e-05, 'learning_rate': 3.362430904083461, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:23<13:05, 3.72s/it] 60%|█████▉ | 310/520 [19:27<13:00, 3.72s/it] {'loss': 14.8126, 'grad_norm': 4.523556608706176e-05, 'learning_rate': 3.3353142970386562, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:27<13:00, 3.72s/it] 60%|█████▉ | 311/520 [19:31<12:52, 3.70s/it] {'loss': 14.7075, 'grad_norm': 4.6873290594082085e-05, 'learning_rate': 3.3082429428516273, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:31<12:52, 3.70s/it] 60%|██████ | 312/520 [19:34<12:47, 3.69s/it] {'loss': 14.619, 'grad_norm': 5.2273357776012124e-05, 'learning_rate': 3.2812178933564775, 'epoch': 0.6} + 60%|██████ | 312/520 [19:34<12:47, 3.69s/it] 60%|██████ | 313/520 [19:38<12:41, 3.68s/it] {'loss': 14.8573, 'grad_norm': 8.080489410264626e-05, 'learning_rate': 3.254240198588178, 'epoch': 0.6} + 60%|██████ | 313/520 [19:38<12:41, 3.68s/it] 60%|██████ | 314/520 [19:42<13:01, 3.79s/it] {'loss': 14.811, 'grad_norm': 0.00011597618704497413, 'learning_rate': 3.2273109067417765, 'epoch': 0.6} + 60%|██████ | 314/520 [19:42<13:01, 3.79s/it] 61%|██████ | 315/520 [19:46<12:50, 3.76s/it] {'loss': 14.7166, 'grad_norm': 0.00016691581576417505, 'learning_rate': 3.2004310641316662, 'epoch': 0.61} + 61%|██████ | 315/520 [19:46<12:50, 3.76s/it] 61%|██████ | 316/520 [19:50<13:12, 3.88s/it] {'loss': 14.771, 'grad_norm': 6.426075547549287e-05, 'learning_rate': 3.173601715150931, 'epoch': 0.61} + 61%|██████ | 316/520 [19:50<13:12, 3.88s/it] 61%|██████ | 317/520 [19:54<12:56, 3.83s/it] {'loss': 14.8504, 'grad_norm': 4.766707891061255e-05, 'learning_rate': 3.1468239022307714, 'epoch': 0.61} + 61%|██████ | 317/520 [19:54<12:56, 3.83s/it] 61%|██████ | 318/520 [19:57<12:46, 3.79s/it] {'loss': 14.6431, 'grad_norm': 4.567961648846887e-05, 'learning_rate': 3.120098665799996, 'epoch': 0.61} + 61%|██████ | 318/520 [19:57<12:46, 3.79s/it] 61%|██████▏ | 319/520 [20:01<13:05, 3.91s/it] {'loss': 14.8819, 'grad_norm': 2.4699805929376615e-05, 'learning_rate': 3.0934270442446006, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:01<13:05, 3.91s/it] 62%|██████▏ | 320/520 [20:05<12:51, 3.86s/it] {'loss': 14.6685, 'grad_norm': 2.857390322885656e-05, 'learning_rate': 3.0668100738674204, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:05<12:51, 3.86s/it] 62%|██████▏ | 321/520 [20:09<12:37, 3.80s/it] {'loss': 14.7958, 'grad_norm': 3.5704741991923316e-05, 'learning_rate': 3.040248788847869, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:09<12:37, 3.80s/it] 62%|██████▏ | 322/520 [20:13<12:32, 3.80s/it] {'loss': 14.7535, 'grad_norm': 1.9628599845316975e-05, 'learning_rate': 3.0137442212017493, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:13<12:32, 3.80s/it] 62%|██████▏ | 323/520 [20:16<12:21, 3.76s/it] {'loss': 14.7226, 'grad_norm': 2.140669635542293e-05, 'learning_rate': 2.9872974007411623, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:16<12:21, 3.76s/it] 62%|██████▏ | 324/520 [20:20<12:12, 3.74s/it] {'loss': 14.8556, 'grad_norm': 3.813983402304934e-05, 'learning_rate': 2.960909355034491, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:20<12:12, 3.74s/it] 62%|██████▎ | 325/520 [20:24<12:09, 3.74s/it] {'loss': 14.7625, 'grad_norm': 6.079473463975533e-05, 'learning_rate': 2.9345811093664773, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:24<12:09, 3.74s/it] 63%|██████▎ | 326/520 [20:27<12:02, 3.72s/it] {'loss': 14.7998, 'grad_norm': 3.062436759013473e-05, 'learning_rate': 2.9083136866983836, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:27<12:02, 3.72s/it] 63%|██████▎ | 327/520 [20:31<11:58, 3.73s/it] {'loss': 14.726, 'grad_norm': 2.1373017135914768e-05, 'learning_rate': 2.882108107628246, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:31<11:58, 3.73s/it] 63%|██████▎ | 328/520 [20:35<11:55, 3.73s/it] {'loss': 14.8837, 'grad_norm': 3.084349217402519e-05, 'learning_rate': 2.8559653903512223, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:35<11:55, 3.73s/it] 63%|██████▎ | 329/520 [20:39<11:52, 3.73s/it] {'loss': 14.8461, 'grad_norm': 3.1264587926592885e-05, 'learning_rate': 2.8298865506200293, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:39<11:52, 3.73s/it] 63%|██████▎ | 330/520 [20:42<11:46, 3.72s/it] {'loss': 14.7901, 'grad_norm': 3.469392301066385e-05, 'learning_rate': 2.8038726017054767, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:42<11:46, 3.72s/it] 64%|██████▎ | 331/520 [20:46<11:40, 3.71s/it] {'loss': 14.6923, 'grad_norm': 4.133651754827489e-05, 'learning_rate': 2.7779245543570963, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:46<11:40, 3.71s/it] 64%|██████▍ | 332/520 [20:50<11:35, 3.70s/it] {'loss': 15.0016, 'grad_norm': 3.386390139585296e-05, 'learning_rate': 2.752043416763874, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:50<11:35, 3.70s/it] 64%|██████▍ | 333/520 [20:53<11:29, 3.69s/it] {'loss': 14.774, 'grad_norm': 3.7057713160397586e-05, 'learning_rate': 2.7262301945150735, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:53<11:29, 3.69s/it] 64%|██████▍ | 334/520 [20:57<11:27, 3.70s/it] {'loss': 14.8819, 'grad_norm': 5.749921421408949e-05, 'learning_rate': 2.7004858905611666, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:57<11:27, 3.70s/it] 64%|██████▍ | 335/520 [21:01<11:23, 3.69s/it] {'loss': 14.8691, 'grad_norm': 0.0001381894080968299, 'learning_rate': 2.674811505174863, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:01<11:23, 3.69s/it] 65%|██████▍ | 336/520 [21:04<11:20, 3.70s/it] {'loss': 14.6682, 'grad_norm': 3.1838143709410104e-05, 'learning_rate': 2.649208035912249, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:04<11:20, 3.70s/it] 65%|██████▍ | 337/520 [21:08<11:14, 3.68s/it] {'loss': 14.5185, 'grad_norm': 4.775895031821566e-05, 'learning_rate': 2.6236764775740253, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:08<11:14, 3.68s/it] 65%|██████▌ | 338/520 [21:12<11:12, 3.69s/it] {'loss': 14.7022, 'grad_norm': 4.2893537077311265e-05, 'learning_rate': 2.5982178221668533, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:12<11:12, 3.69s/it] 65%|██████▌ | 339/520 [21:16<11:09, 3.70s/it] {'loss': 14.7439, 'grad_norm': 3.6298354238550416e-05, 'learning_rate': 2.572833058864817, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:16<11:09, 3.70s/it] 65%|██████▌ | 340/520 [21:19<11:03, 3.69s/it] {'loss': 14.7383, 'grad_norm': 4.535929433975517e-05, 'learning_rate': 2.5475231739709887, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:19<11:03, 3.69s/it] 66%|██████▌ | 341/520 [21:23<10:58, 3.68s/it] {'loss': 14.7669, 'grad_norm': 2.8913595183735995e-05, 'learning_rate': 2.522289150879097, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:23<10:58, 3.68s/it] 66%|██████▌ | 342/520 [21:26<10:51, 3.66s/it] {'loss': 14.9301, 'grad_norm': 2.9475142557160966e-05, 'learning_rate': 2.4971319700353343, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:26<10:51, 3.66s/it] 66%|██████▌ | 343/520 [21:30<10:47, 3.66s/it] {'loss': 14.9525, 'grad_norm': 2.0891762234059074e-05, 'learning_rate': 2.4720526089002455, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:30<10:47, 3.66s/it] 66%|██████▌ | 344/520 [21:34<10:47, 3.68s/it] {'loss': 14.6736, 'grad_norm': 2.1352676861657663e-05, 'learning_rate': 2.4470520419107666, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:34<10:47, 3.68s/it] 66%|██████▋ | 345/520 [21:37<10:43, 3.68s/it] {'loss': 14.7707, 'grad_norm': 4.015480191601532e-05, 'learning_rate': 2.4221312404423485, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:37<10:43, 3.68s/it] 67%|██████▋ | 346/520 [21:41<10:41, 3.68s/it] {'loss': 14.8792, 'grad_norm': 1.8210580744935143e-05, 'learning_rate': 2.397291172771221, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:41<10:41, 3.68s/it] 67%|██████▋ | 347/520 [21:45<10:36, 3.68s/it] {'loss': 14.9815, 'grad_norm': 2.1905294041463942e-05, 'learning_rate': 2.372532804036779, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:45<10:36, 3.68s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:49<10:30, 3.67s/it] {'loss': 14.4499, 'grad_norm': 3.6049530318075674e-05, 'learning_rate': 2.3478570962040695, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:49<10:30, 3.67s/it] 67%|██████▋ | 349/520 [21:52<10:29, 3.68s/it] {'loss': 14.5975, 'grad_norm': 3.6218721202907444e-05, 'learning_rate': 2.323265008026421, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:52<10:29, 3.68s/it] 67%|██████▋ | 350/520 [21:56<10:26, 3.68s/it] {'loss': 14.7808, 'grad_norm': 3.78001379135535e-05, 'learning_rate': 2.2987574950082, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:56<10:26, 3.68s/it] 68%|██████▊ | 351/520 [22:00<10:22, 3.68s/it] {'loss': 14.6321, 'grad_norm': 5.585796140594672e-05, 'learning_rate': 2.2743355093676665, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:00<10:22, 3.68s/it] 68%|██████▊ | 352/520 [22:03<10:17, 3.68s/it] {'loss': 14.8505, 'grad_norm': 2.0284541368595207e-05, 'learning_rate': 2.250000000000001, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:03<10:17, 3.68s/it] 68%|██████▊ | 353/520 [22:07<10:15, 3.68s/it] {'loss': 14.8465, 'grad_norm': 1.952963760897215e-05, 'learning_rate': 2.225751912440413, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:07<10:15, 3.68s/it] 68%|██████▊ | 354/520 [22:11<10:12, 3.69s/it] {'loss': 14.9605, 'grad_norm': 2.2289072224188875e-05, 'learning_rate': 2.2015921888274157, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:11<10:12, 3.69s/it] 68%|██████▊ | 355/520 [22:14<10:08, 3.69s/it] {'loss': 14.7912, 'grad_norm': 2.263873236882292e-05, 'learning_rate': 2.1775217678662195, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:14<10:08, 3.69s/it] 68%|██████▊ | 356/520 [22:18<10:03, 3.68s/it] {'loss': 14.6806, 'grad_norm': 3.9877143692774684e-05, 'learning_rate': 2.1535415847922588, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:18<10:03, 3.68s/it] 69%|██████▊ | 357/520 [22:22<09:59, 3.68s/it] {'loss': 14.9302, 'grad_norm': 3.4797342685478205e-05, 'learning_rate': 2.1296525713348466, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:22<09:59, 3.68s/it] 69%|██████▉ | 358/520 [22:25<09:56, 3.68s/it] {'loss': 14.7519, 'grad_norm': 2.2226610692703594e-05, 'learning_rate': 2.105855655680986, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:25<09:56, 3.68s/it] 69%|██████▉ | 359/520 [22:29<09:53, 3.68s/it] {'loss': 14.7474, 'grad_norm': 2.583100271932605e-05, 'learning_rate': 2.082151762439292, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:29<09:53, 3.68s/it] 69%|██████▉ | 360/520 [22:33<09:50, 3.69s/it] {'loss': 14.8242, 'grad_norm': 5.223527659388323e-05, 'learning_rate': 2.058541812604083, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:33<09:50, 3.69s/it] 69%|██████▉ | 361/520 [22:36<09:46, 3.69s/it] {'loss': 15.0043, 'grad_norm': 2.557348356360843e-05, 'learning_rate': 2.0350267235195796, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:36<09:46, 3.69s/it] 70%|██████▉ | 362/520 [22:40<09:42, 3.69s/it] {'loss': 14.6777, 'grad_norm': 2.8071938161758846e-05, 'learning_rate': 2.0116074088442724, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:40<09:42, 3.69s/it] 70%|██████▉ | 363/520 [22:44<09:39, 3.69s/it] {'loss': 14.8289, 'grad_norm': 3.411296924991698e-05, 'learning_rate': 1.988284778515423, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:44<09:39, 3.69s/it] 70%|███████ | 364/520 [22:48<09:35, 3.69s/it] {'loss': 14.7716, 'grad_norm': 2.864243612277942e-05, 'learning_rate': 1.9650597387137008, 'epoch': 0.7} + 70%|███████ | 364/520 [22:48<09:35, 3.69s/it] 70%|███████ | 365/520 [22:51<09:34, 3.71s/it] {'loss': 14.7019, 'grad_norm': 2.9572210881492106e-05, 'learning_rate': 1.9419331918279852, 'epoch': 0.7} + 70%|███████ | 365/520 [22:51<09:34, 3.71s/it] 70%|███████ | 366/520 [22:55<09:36, 3.74s/it] {'loss': 14.7628, 'grad_norm': 3.602136923839907e-05, 'learning_rate': 1.9189060364202937, 'epoch': 0.7} + 70%|███████ | 366/520 [22:55<09:36, 3.74s/it] 71%|███████ | 367/520 [22:59<09:39, 3.79s/it] {'loss': 14.6853, 'grad_norm': 3.1872214931028794e-05, 'learning_rate': 1.8959791671908741, 'epoch': 0.71} + 71%|███████ | 367/520 [22:59<09:39, 3.79s/it] 71%|███████ | 368/520 [23:03<09:41, 3.83s/it] {'loss': 14.6051, 'grad_norm': 4.2586006420640246e-05, 'learning_rate': 1.8731534749434466, 'epoch': 0.71} + 71%|███████ | 368/520 [23:03<09:41, 3.83s/it] 71%|███████ | 369/520 [23:07<09:41, 3.85s/it] {'loss': 14.994, 'grad_norm': 6.564764445748639e-05, 'learning_rate': 1.850429846550579, 'epoch': 0.71} + 71%|███████ | 369/520 [23:07<09:41, 3.85s/it] 71%|███████ | 370/520 [23:11<09:38, 3.86s/it] {'loss': 14.7755, 'grad_norm': 5.343081874721943e-05, 'learning_rate': 1.8278091649192434, 'epoch': 0.71} + 71%|███████ | 370/520 [23:11<09:38, 3.86s/it] 71%|███████▏ | 371/520 [23:15<09:34, 3.86s/it] {'loss': 14.7045, 'grad_norm': 2.6959046812122657e-05, 'learning_rate': 1.8052923089564987, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:15<09:34, 3.86s/it] 72%|███████▏ | 372/520 [23:19<09:42, 3.93s/it] {'loss': 14.9517, 'grad_norm': 2.1107053095737445e-05, 'learning_rate': 1.7828801535353507, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:19<09:42, 3.93s/it] 72%|███████▏ | 373/520 [23:23<09:46, 3.99s/it] {'loss': 14.7473, 'grad_norm': 2.5462920189013713e-05, 'learning_rate': 1.7605735694607572, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:23<09:46, 3.99s/it] 72%|███████▏ | 374/520 [23:27<09:48, 4.03s/it] {'loss': 14.8026, 'grad_norm': 2.2829143033630748e-05, 'learning_rate': 1.7383734234357875, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:27<09:48, 4.03s/it] 72%|███████▏ | 375/520 [23:31<09:48, 4.06s/it] {'loss': 14.7174, 'grad_norm': 3.784567801887233e-05, 'learning_rate': 1.7162805780279533, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:31<09:48, 4.06s/it] 72%|███████▏ | 376/520 [23:35<09:47, 4.08s/it] {'loss': 14.8352, 'grad_norm': 2.6100535738917988e-05, 'learning_rate': 1.6942958916356994, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:35<09:47, 4.08s/it] 72%|███████▎ | 377/520 [23:39<09:46, 4.10s/it] {'loss': 14.8135, 'grad_norm': 3.258541673887783e-05, 'learning_rate': 1.6724202184550372, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:39<09:46, 4.10s/it] 73%|███████▎ | 378/520 [23:43<09:43, 4.11s/it] {'loss': 14.9318, 'grad_norm': 2.062636412914666e-05, 'learning_rate': 1.6506544084463712, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:43<09:43, 4.11s/it] 73%|███████▎ | 379/520 [23:48<09:39, 4.11s/it] {'loss': 14.8666, 'grad_norm': 1.673164786913176e-05, 'learning_rate': 1.628999307301462, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:48<09:39, 4.11s/it] 73%|███████▎ | 380/520 [23:52<09:35, 4.11s/it] {'loss': 14.9678, 'grad_norm': 1.4680131388949213e-05, 'learning_rate': 1.607455756410573, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:52<09:35, 4.11s/it] 73%|███████▎ | 381/520 [23:56<09:31, 4.11s/it] {'loss': 14.8287, 'grad_norm': 1.5805440159868736e-05, 'learning_rate': 1.5860245928297836, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:56<09:31, 4.11s/it] 73%|███████▎ | 382/520 [24:00<09:25, 4.10s/it] {'loss': 14.8255, 'grad_norm': 2.6319769351208076e-05, 'learning_rate': 1.5647066492484563, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:00<09:25, 4.10s/it] 74%|███████▎ | 383/520 [24:04<09:21, 4.10s/it] {'loss': 14.5224, 'grad_norm': 3.718112519427888e-05, 'learning_rate': 1.5435027539568884, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:04<09:21, 4.10s/it] 74%|███████▍ | 384/520 [24:08<09:08, 4.04s/it] {'loss': 15.0141, 'grad_norm': 1.5555826987618708e-05, 'learning_rate': 1.5224137308141337, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:08<09:08, 4.04s/it] 74%|███████▍ | 385/520 [24:12<08:59, 4.00s/it] {'loss': 14.9352, 'grad_norm': 2.6773739122927985e-05, 'learning_rate': 1.5014403992159824, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:12<08:59, 4.00s/it] 74%|███████▍ | 386/520 [24:16<08:51, 3.96s/it] {'loss': 14.7911, 'grad_norm': 3.5406294508916785e-05, 'learning_rate': 1.4805835740631352, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:16<08:51, 3.96s/it] 74%|███████▍ | 387/520 [24:20<08:43, 3.94s/it] {'loss': 15.0214, 'grad_norm': 2.1696162717494498e-05, 'learning_rate': 1.4598440657295289, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:20<08:43, 3.94s/it] 75%|███████▍ | 388/520 [24:23<08:39, 3.93s/it] {'loss': 14.589, 'grad_norm': 3.5514466368605415e-05, 'learning_rate': 1.4392226800308618, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:23<08:39, 3.93s/it] 75%|███████▍ | 389/520 [24:27<08:31, 3.91s/it] {'loss': 14.6054, 'grad_norm': 3.696319492423201e-05, 'learning_rate': 1.4187202181932792, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:27<08:31, 3.91s/it] 75%|███████▌ | 390/520 [24:31<08:26, 3.90s/it] {'loss': 14.8448, 'grad_norm': 1.6094871599879583e-05, 'learning_rate': 1.3983374768222383, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:31<08:26, 3.90s/it] 75%|███████▌ | 391/520 [24:35<08:23, 3.90s/it] {'loss': 14.8763, 'grad_norm': 1.78763693802183e-05, 'learning_rate': 1.3780752478715625, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:35<08:23, 3.90s/it] 75%|███████▌ | 392/520 [24:39<08:20, 3.91s/it] {'loss': 14.7075, 'grad_norm': 2.3429922247116375e-05, 'learning_rate': 1.3579343186126727, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:39<08:20, 3.91s/it] 76%|███████▌ | 393/520 [24:43<08:16, 3.91s/it] {'loss': 14.9988, 'grad_norm': 1.654260285676332e-05, 'learning_rate': 1.337915471603989, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:43<08:16, 3.91s/it] 76%|███████▌ | 394/520 [24:47<08:11, 3.90s/it] {'loss': 14.8183, 'grad_norm': 1.5086073601942042e-05, 'learning_rate': 1.3180194846605364, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:47<08:11, 3.90s/it] 76%|███████▌ | 395/520 [24:51<08:08, 3.91s/it] {'loss': 14.7046, 'grad_norm': 1.941935010132199e-05, 'learning_rate': 1.2982471308237153, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:51<08:08, 3.91s/it] 76%|███████▌ | 396/520 [24:55<08:04, 3.91s/it] {'loss': 14.7384, 'grad_norm': 2.6338342858871855e-05, 'learning_rate': 1.278599178331267, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:55<08:04, 3.91s/it] 76%|███████▋ | 397/520 [24:58<07:59, 3.90s/it] {'loss': 14.8573, 'grad_norm': 1.9160530165010064e-05, 'learning_rate': 1.2590763905874311, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:59<07:59, 3.90s/it] 77%|███████▋ | 398/520 [25:02<07:50, 3.86s/it] {'loss': 14.6948, 'grad_norm': 2.1309695704271795e-05, 'learning_rate': 1.2396795261332731, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:02<07:50, 3.86s/it] 77%|███████▋ | 399/520 [25:06<07:39, 3.80s/it] {'loss': 14.7487, 'grad_norm': 2.0587142542641496e-05, 'learning_rate': 1.2204093386172226, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:06<07:39, 3.80s/it] 77%|███████▋ | 400/520 [25:10<07:32, 3.77s/it] {'loss': 14.8248, 'grad_norm': 2.154527883496148e-05, 'learning_rate': 1.2012665767657824, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:10<07:32, 3.77s/it] 77%|███████▋ | 401/520 [25:13<07:25, 3.74s/it] {'loss': 14.7398, 'grad_norm': 2.7438398784965147e-05, 'learning_rate': 1.1822519843544421, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:13<07:25, 3.74s/it] 77%|███████▋ | 402/520 [25:17<07:17, 3.71s/it] {'loss': 14.7064, 'grad_norm': 2.9243299237399e-05, 'learning_rate': 1.1633663001787797, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:17<07:17, 3.71s/it] 78%|███████▊ | 403/520 [25:21<07:14, 3.71s/it] {'loss': 14.8164, 'grad_norm': 1.6351741789258382e-05, 'learning_rate': 1.144610258025755, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:21<07:14, 3.71s/it] 78%|███████▊ | 404/520 [25:24<07:09, 3.70s/it] {'loss': 14.6089, 'grad_norm': 2.793823978207904e-05, 'learning_rate': 1.1259845866451956, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:24<07:09, 3.70s/it] 78%|███████▊ | 405/520 [25:28<07:04, 3.69s/it] {'loss': 14.8276, 'grad_norm': 2.401451544903658e-05, 'learning_rate': 1.1074900097214908, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:28<07:04, 3.69s/it] 78%|███████▊ | 406/520 [25:32<07:03, 3.71s/it] {'loss': 14.5173, 'grad_norm': 4.615030034834689e-05, 'learning_rate': 1.0891272458454608, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:32<07:03, 3.71s/it] 78%|███████▊ | 407/520 [25:35<06:59, 3.71s/it] {'loss': 14.843, 'grad_norm': 2.6554648185941644e-05, 'learning_rate': 1.0708970084864515, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:35<06:59, 3.71s/it] 78%|███████▊ | 408/520 [25:39<06:55, 3.71s/it] {'loss': 14.7959, 'grad_norm': 6.998166535379324e-05, 'learning_rate': 1.0528000059645994, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:39<06:55, 3.71s/it] 79%|███████▊ | 409/520 [25:43<06:56, 3.76s/it] {'loss': 14.812, 'grad_norm': 1.989715048847752e-05, 'learning_rate': 1.0348369414233174, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:43<06:56, 3.76s/it] 79%|███████▉ | 410/520 [25:47<06:49, 3.73s/it] {'loss': 14.592, 'grad_norm': 2.6090068182266188e-05, 'learning_rate': 1.0170085128019768, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:47<06:49, 3.73s/it] 79%|███████▉ | 411/520 [25:50<06:45, 3.72s/it] {'loss': 14.7472, 'grad_norm': 1.9113563351133e-05, 'learning_rate': 0.9993154128087836, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:50<06:45, 3.72s/it] 79%|███████▉ | 412/520 [25:54<06:41, 3.72s/it] {'loss': 14.7476, 'grad_norm': 1.5705468985699847e-05, 'learning_rate': 0.9817583288938658, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:54<06:41, 3.72s/it] 79%|███████▉ | 413/520 [25:58<06:36, 3.71s/it] {'loss': 14.8518, 'grad_norm': 1.4598760427429175e-05, 'learning_rate': 0.9643379432225693, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:58<06:36, 3.71s/it] 80%|███████▉ | 414/520 [26:02<06:35, 3.73s/it] {'loss': 14.862, 'grad_norm': 2.6911915311901485e-05, 'learning_rate': 0.947054932648941, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:02<06:35, 3.73s/it] 80%|███████▉ | 415/520 [26:05<06:30, 3.72s/it] {'loss': 14.7229, 'grad_norm': 2.959797954428834e-05, 'learning_rate': 0.9299099686894423, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:05<06:30, 3.72s/it] 80%|████████ | 416/520 [26:09<06:24, 3.70s/it] {'loss': 14.4377, 'grad_norm': 2.880532596100753e-05, 'learning_rate': 0.9129037174968502, 'epoch': 0.8} + 80%|████████ | 416/520 [26:09<06:24, 3.70s/it] 80%|████████ | 417/520 [26:13<06:21, 3.70s/it] {'loss': 14.8681, 'grad_norm': 2.3667902221034254e-05, 'learning_rate': 0.8960368398343747, 'epoch': 0.8} + 80%|████████ | 417/520 [26:13<06:21, 3.70s/it] 80%|████████ | 418/520 [26:16<06:19, 3.72s/it] {'loss': 14.9078, 'grad_norm': 2.4255014241804138e-05, 'learning_rate': 0.8793099910499924, 'epoch': 0.8} + 80%|████████ | 418/520 [26:16<06:19, 3.72s/it] 81%|████████ | 419/520 [26:20<06:22, 3.78s/it] {'loss': 14.6224, 'grad_norm': 3.4195979643324666e-05, 'learning_rate': 0.8627238210509764, 'epoch': 0.81} + 81%|████████ | 419/520 [26:20<06:22, 3.78s/it] 81%|████████ | 420/520 [26:24<06:22, 3.82s/it] {'loss': 14.6486, 'grad_norm': 3.0057000259051997e-05, 'learning_rate': 0.8462789742786457, 'epoch': 0.81} + 81%|████████ | 420/520 [26:24<06:22, 3.82s/it] 81%|████████ | 421/520 [26:28<06:21, 3.86s/it] {'loss': 14.4053, 'grad_norm': 3.509522702260528e-05, 'learning_rate': 0.8299760896833293, 'epoch': 0.81} + 81%|████████ | 421/520 [26:28<06:21, 3.86s/it] 81%|████████ | 422/520 [26:32<06:15, 3.83s/it] {'loss': 14.7434, 'grad_norm': 2.539982249553363e-05, 'learning_rate': 0.8138158006995364, 'epoch': 0.81} + 81%|████████ | 422/520 [26:32<06:15, 3.83s/it] 81%|████████▏ | 423/520 [26:36<06:07, 3.78s/it] {'loss': 14.5645, 'grad_norm': 4.106229497493421e-05, 'learning_rate': 0.7977987352213499, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:36<06:07, 3.78s/it] 82%|████████▏ | 424/520 [26:39<06:04, 3.79s/it] {'loss': 14.9173, 'grad_norm': 2.9411118846899217e-05, 'learning_rate': 0.7819255155780239, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:39<06:04, 3.79s/it] 82%|████████▏ | 425/520 [26:43<05:58, 3.77s/it] {'loss': 14.7426, 'grad_norm': 3.0429211052819406e-05, 'learning_rate': 0.7661967585098063, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:43<05:58, 3.77s/it] 82%|████████▏ | 426/520 [26:47<05:50, 3.73s/it] {'loss': 14.7554, 'grad_norm': 1.7985496957892628e-05, 'learning_rate': 0.7506130751439803, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:47<05:50, 3.73s/it] 82%|████████▏ | 427/520 [26:50<05:43, 3.70s/it] {'loss': 14.7843, 'grad_norm': 1.905867435332026e-05, 'learning_rate': 0.7351750709711111, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:50<05:43, 3.70s/it] 82%|████████▏ | 428/520 [26:54<05:39, 3.69s/it] {'loss': 14.6359, 'grad_norm': 2.2444500945437086e-05, 'learning_rate': 0.7198833458215287, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:54<05:39, 3.69s/it] 82%|████████▎ | 429/520 [26:58<05:37, 3.71s/it] {'loss': 14.7051, 'grad_norm': 1.4723735037590065e-05, 'learning_rate': 0.7047384938420153, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:58<05:37, 3.71s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:02<05:34, 3.71s/it] {'loss': 14.8678, 'grad_norm': 1.937861020453483e-05, 'learning_rate': 0.6897411034727214, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:02<05:34, 3.71s/it] 83%|████████▎ | 431/520 [27:05<05:31, 3.72s/it] {'loss': 14.8678, 'grad_norm': 1.159319660039355e-05, 'learning_rate': 0.674891757424309, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:05<05:31, 3.72s/it] 83%|████████▎ | 432/520 [27:09<05:26, 3.72s/it] {'loss': 14.6982, 'grad_norm': 1.715092603386697e-05, 'learning_rate': 0.6601910326552998, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:09<05:26, 3.72s/it] 83%|████████▎ | 433/520 [27:13<05:22, 3.71s/it] {'loss': 14.8025, 'grad_norm': 1.3881899001228945e-05, 'learning_rate': 0.645639500349669, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:13<05:22, 3.71s/it] 83%|████████▎ | 434/520 [27:16<05:19, 3.72s/it] {'loss': 14.5331, 'grad_norm': 2.083741605913974e-05, 'learning_rate': 0.6312377258946437, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:16<05:19, 3.72s/it] 84%|████████▎ | 435/520 [27:20<05:16, 3.72s/it] {'loss': 14.8201, 'grad_norm': 2.3092442104564877e-05, 'learning_rate': 0.6169862688587413, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:20<05:16, 3.72s/it] 84%|████████▍ | 436/520 [27:24<05:13, 3.73s/it] {'loss': 14.7078, 'grad_norm': 3.6757729711766734e-05, 'learning_rate': 0.6028856829700258, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:24<05:13, 3.73s/it] 84%|████████▍ | 437/520 [27:28<05:08, 3.71s/it] {'loss': 14.7836, 'grad_norm': 2.3461439447842015e-05, 'learning_rate': 0.5889365160945912, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:28<05:08, 3.71s/it] 84%|████████▍ | 438/520 [27:31<05:04, 3.71s/it] {'loss': 14.7056, 'grad_norm': 1.904121458062234e-05, 'learning_rate': 0.575139310215276, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:31<05:04, 3.71s/it] 84%|████████▍ | 439/520 [27:35<05:00, 3.71s/it] {'loss': 14.9362, 'grad_norm': 1.597529656543235e-05, 'learning_rate': 0.5614946014106084, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:35<05:00, 3.71s/it] 85%|████████▍ | 440/520 [27:39<04:55, 3.70s/it] {'loss': 14.8315, 'grad_norm': 2.4191559630814213e-05, 'learning_rate': 0.5480029198339711, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:39<04:55, 3.70s/it] 85%|████████▍ | 441/520 [27:42<04:52, 3.71s/it] {'loss': 14.8198, 'grad_norm': 2.249768863415632e-05, 'learning_rate': 0.5346647896930092, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:42<04:52, 3.71s/it] 85%|████████▌ | 442/520 [27:46<04:47, 3.69s/it] {'loss': 14.6237, 'grad_norm': 2.6675172379234647e-05, 'learning_rate': 0.5214807292292565, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:46<04:47, 3.69s/it] 85%|████████▌ | 443/520 [27:50<04:46, 3.72s/it] {'loss': 14.8146, 'grad_norm': 1.7765300716204827e-05, 'learning_rate': 0.5084512506980023, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:50<04:46, 3.72s/it] 85%|████████▌ | 444/520 [27:54<04:47, 3.78s/it] {'loss': 14.7829, 'grad_norm': 2.14703613680092e-05, 'learning_rate': 0.49557686034839155, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:54<04:47, 3.78s/it] 86%|████████▌ | 445/520 [27:58<04:44, 3.80s/it] {'loss': 14.7786, 'grad_norm': 2.348865374907283e-05, 'learning_rate': 0.48285805840374907, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:58<04:44, 3.80s/it] 86%|████████▌ | 446/520 [28:01<04:42, 3.82s/it] {'loss': 14.9245, 'grad_norm': 1.5359252130066438e-05, 'learning_rate': 0.4702953390421458, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:01<04:42, 3.82s/it] 86%|████████▌ | 447/520 [28:05<04:39, 3.83s/it] {'loss': 14.6227, 'grad_norm': 2.2598236623398043e-05, 'learning_rate': 0.4578891903772018, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:05<04:39, 3.83s/it] 86%|████████▌ | 448/520 [28:09<04:36, 3.84s/it] {'loss': 14.7806, 'grad_norm': 3.177317742131525e-05, 'learning_rate': 0.44564009443911434, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:09<04:36, 3.84s/it] 86%|████████▋ | 449/520 [28:13<04:32, 3.84s/it] {'loss': 14.8176, 'grad_norm': 2.9079692953786966e-05, 'learning_rate': 0.43354852715593584, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:13<04:32, 3.84s/it] 87%|████████▋ | 450/520 [28:17<04:29, 3.84s/it] {'loss': 14.852, 'grad_norm': 4.9513616095258555e-05, 'learning_rate': 0.4216149583350753, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:17<04:29, 3.84s/it] 87%|████████▋ | 451/520 [28:21<04:25, 3.85s/it] {'loss': 14.6793, 'grad_norm': 6.703276626982968e-05, 'learning_rate': 0.40983985164505077, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:21<04:25, 3.85s/it] 87%|████████▋ | 452/520 [28:25<04:22, 3.85s/it] {'loss': 14.9261, 'grad_norm': 2.0919890841494628e-05, 'learning_rate': 0.3982236645974709, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:25<04:22, 3.85s/it] 87%|████████▋ | 453/520 [28:28<04:17, 3.84s/it] {'loss': 14.8336, 'grad_norm': 2.2127600532902188e-05, 'learning_rate': 0.38676684852925647, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:28<04:17, 3.84s/it] 87%|████████▋ | 454/520 [28:32<04:14, 3.86s/it] {'loss': 14.7366, 'grad_norm': 3.293192429011241e-05, 'learning_rate': 0.3754698485851071, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:32<04:14, 3.86s/it] 88%|████████▊ | 455/520 [28:36<04:11, 3.87s/it] {'loss': 14.8113, 'grad_norm': 2.8993328809710757e-05, 'learning_rate': 0.36433310370020705, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:36<04:11, 3.87s/it] 88%|████████▊ | 456/520 [28:40<04:07, 3.87s/it] {'loss': 14.8176, 'grad_norm': 2.6723967442945946e-05, 'learning_rate': 0.3533570465831652, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:40<04:07, 3.87s/it] 88%|████████▊ | 457/520 [28:44<04:04, 3.88s/it] {'loss': 14.7208, 'grad_norm': 3.990484184512646e-05, 'learning_rate': 0.3425421036992097, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:44<04:04, 3.88s/it] 88%|████████▊ | 458/520 [28:48<04:00, 3.87s/it] {'loss': 14.7485, 'grad_norm': 2.438129766008431e-05, 'learning_rate': 0.3318886952536111, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:48<04:00, 3.87s/it] 88%|████████▊ | 459/520 [28:52<03:55, 3.87s/it] {'loss': 14.787, 'grad_norm': 2.0103261011809172e-05, 'learning_rate': 0.321397235175359, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:52<03:55, 3.87s/it] 88%|████████▊ | 460/520 [28:56<03:52, 3.87s/it] {'loss': 14.5575, 'grad_norm': 2.6867142978273764e-05, 'learning_rate': 0.3110681311010814, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:56<03:52, 3.87s/it] 89%|████████▊ | 461/520 [28:59<03:48, 3.87s/it] {'loss': 15.0901, 'grad_norm': 1.4956461047458307e-05, 'learning_rate': 0.30090178435920073, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:59<03:48, 3.87s/it] 89%|████████▉ | 462/520 [29:03<03:43, 3.86s/it] {'loss': 14.9488, 'grad_norm': 1.2856747541318767e-05, 'learning_rate': 0.29089858995434703, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:03<03:43, 3.86s/it] 89%|████████▉ | 463/520 [29:07<03:39, 3.85s/it] {'loss': 14.5256, 'grad_norm': 4.846143211711264e-05, 'learning_rate': 0.2810589365520041, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:07<03:39, 3.85s/it] 89%|████████▉ | 464/520 [29:11<03:36, 3.86s/it] {'loss': 14.7608, 'grad_norm': 2.0286286853235245e-05, 'learning_rate': 0.2713832064634126, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:11<03:36, 3.86s/it] 89%|████████▉ | 465/520 [29:15<03:32, 3.87s/it] {'loss': 14.9024, 'grad_norm': 1.9103103650221605e-05, 'learning_rate': 0.2618717756307144, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:15<03:32, 3.87s/it] 90%|████████▉ | 466/520 [29:19<03:28, 3.86s/it] {'loss': 14.8908, 'grad_norm': 1.9510725586466793e-05, 'learning_rate': 0.2525250136123459, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:19<03:28, 3.86s/it] 90%|████████▉ | 467/520 [29:23<03:25, 3.88s/it] {'loss': 14.8138, 'grad_norm': 1.848394300511631e-05, 'learning_rate': 0.2433432835686779, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:23<03:25, 3.88s/it] 90%|█████████ | 468/520 [29:27<03:21, 3.87s/it] {'loss': 14.5904, 'grad_norm': 1.980386564197031e-05, 'learning_rate': 0.23432694224790734, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:27<03:21, 3.87s/it] 90%|█████████ | 469/520 [29:30<03:18, 3.89s/it] {'loss': 14.856, 'grad_norm': 1.296860862031898e-05, 'learning_rate': 0.22547633997219302, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:30<03:18, 3.89s/it] 90%|█████████ | 470/520 [29:34<03:14, 3.88s/it] {'loss': 14.7238, 'grad_norm': 1.2124996044046202e-05, 'learning_rate': 0.2167918206240494, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:34<03:14, 3.88s/it] 91%|█████████ | 471/520 [29:38<03:09, 3.86s/it] {'loss': 14.6579, 'grad_norm': 1.781441057554886e-05, 'learning_rate': 0.2082737216329793, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:38<03:09, 3.86s/it] 91%|█████████ | 472/520 [29:42<03:04, 3.85s/it] {'loss': 14.672, 'grad_norm': 3.8639337582737386e-05, 'learning_rate': 0.19992237396236645, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:42<03:04, 3.85s/it] 91%|█████████ | 473/520 [29:46<03:00, 3.84s/it] {'loss': 14.6894, 'grad_norm': 1.5653202011594082e-05, 'learning_rate': 0.19173810209661868, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:46<03:00, 3.84s/it] 91%|█████████ | 474/520 [29:50<02:56, 3.84s/it] {'loss': 14.8531, 'grad_norm': 1.9419046718008187e-05, 'learning_rate': 0.18372122402855506, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:50<02:56, 3.84s/it] 91%|█████████▏| 475/520 [29:53<02:52, 3.84s/it] {'loss': 14.819, 'grad_norm': 1.708826008378615e-05, 'learning_rate': 0.1758720512470523, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:53<02:52, 3.84s/it] 92%|█████████▏| 476/520 [29:57<02:48, 3.82s/it] {'loss': 14.6527, 'grad_norm': 1.4256072939060999e-05, 'learning_rate': 0.16819088872494586, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:57<02:48, 3.82s/it] 92%|█████████▏| 477/520 [30:01<02:43, 3.81s/it] {'loss': 14.7521, 'grad_norm': 1.5709034430273514e-05, 'learning_rate': 0.16067803490717553, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:01<02:43, 3.81s/it] 92%|█████████▏| 478/520 [30:05<02:39, 3.80s/it] {'loss': 14.7476, 'grad_norm': 1.4162576840926634e-05, 'learning_rate': 0.1533337816991931, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:05<02:39, 3.80s/it] 92%|█████████▏| 479/520 [30:09<02:35, 3.80s/it] {'loss': 14.8082, 'grad_norm': 1.7537392754164123e-05, 'learning_rate': 0.1461584144556175, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:09<02:35, 3.80s/it] 92%|█████████▏| 480/520 [30:12<02:30, 3.77s/it] {'loss': 14.729, 'grad_norm': 1.3980311504236546e-05, 'learning_rate': 0.13915221196914968, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:12<02:30, 3.77s/it] 92%|█████████▎| 481/520 [30:16<02:26, 3.76s/it] {'loss': 14.8957, 'grad_norm': 1.2214332329051355e-05, 'learning_rate': 0.1323154464597407, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:16<02:26, 3.76s/it] 93%|█████████▎| 482/520 [30:20<02:21, 3.74s/it] {'loss': 14.8368, 'grad_norm': 1.3739544117603401e-05, 'learning_rate': 0.12564838356401475, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:20<02:21, 3.74s/it] 93%|█████████▎| 483/520 [30:23<02:17, 3.71s/it] {'loss': 14.7836, 'grad_norm': 1.9042857913373502e-05, 'learning_rate': 0.11915128232494493, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:23<02:17, 3.71s/it] 93%|█████████▎| 484/520 [30:27<02:13, 3.70s/it] {'loss': 14.6555, 'grad_norm': 2.1202492784402122e-05, 'learning_rate': 0.11282439518179371, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:27<02:13, 3.70s/it] 93%|█████████▎| 485/520 [30:31<02:10, 3.73s/it] {'loss': 14.7059, 'grad_norm': 1.4955097813562085e-05, 'learning_rate': 0.10666796796029987, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:31<02:10, 3.73s/it] 93%|█████████▎| 486/520 [30:35<02:06, 3.72s/it] {'loss': 14.9073, 'grad_norm': 1.4272463646696304e-05, 'learning_rate': 0.10068223986312957, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:35<02:06, 3.72s/it] 94%|█████████▎| 487/520 [30:38<02:02, 3.72s/it] {'loss': 14.8045, 'grad_norm': 1.3716666408487035e-05, 'learning_rate': 0.09486744346058235, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:38<02:02, 3.72s/it] 94%|█████████▍| 488/520 [30:42<01:59, 3.73s/it] {'loss': 14.6079, 'grad_norm': 2.006692711976734e-05, 'learning_rate': 0.08922380468155278, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:42<01:59, 3.73s/it] 94%|█████████▍| 489/520 [30:46<01:55, 3.72s/it] {'loss': 14.932, 'grad_norm': 1.2905049778368392e-05, 'learning_rate': 0.08375154280475555, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:46<01:55, 3.72s/it] 94%|█████████▍| 490/520 [30:49<01:51, 3.71s/it] {'loss': 14.747, 'grad_norm': 1.469136277347849e-05, 'learning_rate': 0.07845087045020277, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:49<01:51, 3.71s/it] 94%|█████████▍| 491/520 [30:53<01:47, 3.71s/it] {'loss': 14.6663, 'grad_norm': 1.7122091727994303e-05, 'learning_rate': 0.07332199357094404, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:53<01:47, 3.71s/it] 95%|█████████▍| 492/520 [30:57<01:43, 3.69s/it] {'loss': 14.8949, 'grad_norm': 1.4790116524198486e-05, 'learning_rate': 0.06836511144506391, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:57<01:43, 3.69s/it] 95%|█████████▍| 493/520 [31:00<01:39, 3.69s/it] {'loss': 14.7354, 'grad_norm': 2.300816303226984e-05, 'learning_rate': 0.06358041666793851, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:00<01:39, 3.69s/it] 95%|█████████▌| 494/520 [31:04<01:36, 3.71s/it] {'loss': 14.8144, 'grad_norm': 1.8624651154321055e-05, 'learning_rate': 0.058968095144755095, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:04<01:36, 3.71s/it] 95%|█████████▌| 495/520 [31:08<01:32, 3.70s/it] {'loss': 14.8324, 'grad_norm': 1.7395924991242827e-05, 'learning_rate': 0.054528326083283785, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:08<01:32, 3.70s/it] 95%|█████████▌| 496/520 [31:12<01:28, 3.71s/it] {'loss': 14.6544, 'grad_norm': 2.382449471168203e-05, 'learning_rate': 0.050261281986921647, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:12<01:28, 3.71s/it] 96%|█████████▌| 497/520 [31:15<01:25, 3.71s/it] {'loss': 14.8515, 'grad_norm': 1.60410587626518e-05, 'learning_rate': 0.04616712864798306, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:15<01:25, 3.71s/it] 96%|█████████▌| 498/520 [31:19<01:21, 3.71s/it] {'loss': 14.7596, 'grad_norm': 1.8449066573756174e-05, 'learning_rate': 0.042246025141262356, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:19<01:21, 3.71s/it] 96%|█████████▌| 499/520 [31:23<01:18, 3.72s/it] {'loss': 14.8061, 'grad_norm': 1.806396385997767e-05, 'learning_rate': 0.03849812381785328, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:23<01:18, 3.72s/it] 96%|█████████▌| 500/520 [31:26<01:14, 3.71s/it] {'loss': 14.6762, 'grad_norm': 1.8698961781617912e-05, 'learning_rate': 0.034923570299225715, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:26<01:14, 3.71s/it] 96%|█████████▋| 501/520 [31:30<01:10, 3.70s/it] {'loss': 14.8329, 'grad_norm': 2.0968669684616917e-05, 'learning_rate': 0.031522503471571706, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:30<01:10, 3.70s/it] 97%|█████████▋| 502/520 [31:34<01:06, 3.70s/it] {'loss': 14.8542, 'grad_norm': 1.832792673228475e-05, 'learning_rate': 0.028295055480408282, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:34<01:06, 3.70s/it] 97%|█████████▋| 503/520 [31:38<01:02, 3.69s/it] {'loss': 14.8183, 'grad_norm': 1.930501048290374e-05, 'learning_rate': 0.025241351725441064, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:38<01:02, 3.69s/it] 97%|█████████▋| 504/520 [31:41<00:59, 3.69s/it] {'loss': 14.6251, 'grad_norm': 2.3738579285612746e-05, 'learning_rate': 0.022361510855693656, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:41<00:59, 3.69s/it] 97%|█████████▋| 505/520 [31:45<00:55, 3.69s/it] {'loss': 14.7233, 'grad_norm': 1.9159951818678745e-05, 'learning_rate': 0.01965564476489784, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:45<00:55, 3.69s/it] 97%|█████████▋| 506/520 [31:49<00:51, 3.68s/it] {'loss': 14.7685, 'grad_norm': 2.512103078855758e-05, 'learning_rate': 0.017123858587145047, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:49<00:51, 3.68s/it] 98%|█████████▊| 507/520 [31:52<00:47, 3.69s/it] {'loss': 14.8888, 'grad_norm': 2.1736563498286162e-05, 'learning_rate': 0.01476625069280213, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:52<00:47, 3.69s/it] 98%|█████████▊| 508/520 [31:56<00:44, 3.68s/it] {'loss': 14.8232, 'grad_norm': 1.8303048825395276e-05, 'learning_rate': 0.012582912684689418, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:56<00:44, 3.68s/it] 98%|█████████▊| 509/520 [32:00<00:40, 3.67s/it] {'loss': 14.8537, 'grad_norm': 2.0777607700704995e-05, 'learning_rate': 0.010573929394520065, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:00<00:40, 3.67s/it] 98%|█████████▊| 510/520 [32:03<00:36, 3.67s/it] {'loss': 14.7269, 'grad_norm': 2.0071839181081556e-05, 'learning_rate': 0.008739378879606685, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:03<00:36, 3.67s/it] 98%|█████████▊| 511/520 [32:07<00:33, 3.68s/it] {'loss': 14.7689, 'grad_norm': 2.1607225431073634e-05, 'learning_rate': 0.007079332419825279, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:07<00:33, 3.68s/it] 98%|█████████▊| 512/520 [32:11<00:30, 3.77s/it] {'loss': 14.7697, 'grad_norm': 2.2550963048536323e-05, 'learning_rate': 0.00559385451484945, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:11<00:30, 3.77s/it] 99%|█████████▊| 513/520 [32:15<00:26, 3.80s/it] {'loss': 14.7662, 'grad_norm': 2.3072602497877586e-05, 'learning_rate': 0.004283002881639908, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:15<00:26, 3.80s/it] 99%|█████████▉| 514/520 [32:19<00:22, 3.81s/it] {'loss': 14.8669, 'grad_norm': 2.0107338994057992e-05, 'learning_rate': 0.003146828452206263, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:19<00:22, 3.81s/it] 99%|█████████▉| 515/520 [32:22<00:19, 3.82s/it] {'loss': 14.8461, 'grad_norm': 2.3469197899449315e-05, 'learning_rate': 0.0021853753716256086, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:22<00:19, 3.82s/it] 99%|█████████▉| 516/520 [32:26<00:15, 3.83s/it] {'loss': 14.6871, 'grad_norm': 2.3447784951571994e-05, 'learning_rate': 0.0013986809963268954, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:26<00:15, 3.83s/it] 99%|█████████▉| 517/520 [32:30<00:11, 3.83s/it] {'loss': 14.9295, 'grad_norm': 2.0205851437201726e-05, 'learning_rate': 0.0007867758926410895, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:30<00:11, 3.83s/it] 100%|█████████▉| 518/520 [32:34<00:07, 3.83s/it] {'loss': 14.6358, 'grad_norm': 2.1279004859909086e-05, 'learning_rate': 0.00034968383561312377, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:34<00:07, 3.83s/it] 100%|█████████▉| 519/520 [32:38<00:03, 3.82s/it] {'loss': 14.7352, 'grad_norm': 2.0808651172785108e-05, 'learning_rate': 8.742180807813638e-05, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:38<00:03, 3.82s/it] 100%|██████████| 520/520 [32:43<00:00, 4.10s/it] {'loss': 14.8971, 'grad_norm': 1.9623380497867346e-05, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:43<00:00, 4.10s/it] {'train_runtime': 1963.0499, 'train_samples_per_second': 33.891, 'train_steps_per_second': 0.265, 'train_loss': 14.751332560410866, 'epoch': 1.0} + 100%|██████████| 520/520 [32:43<00:00, 4.10s/it] 100%|██████████| 520/520 [32:43<00:00, 3.78s/it] +[2025-10-10 09:15:22,376] [INFO] [launch.py:348:main] Process 1920194 exits successfully. +[2025-10-10 09:15:22,377] [INFO] [launch.py:348:main] Process 1920190 exits successfully. +[2025-10-10 09:15:23,378] [INFO] [launch.py:348:main] Process 1920188 exits successfully. +[2025-10-10 09:15:23,379] [INFO] [launch.py:348:main] Process 1920191 exits successfully. +[2025-10-10 09:15:23,379] [INFO] [launch.py:348:main] Process 1920189 exits successfully. +[2025-10-10 09:15:23,380] [INFO] [launch.py:348:main] Process 1920192 exits successfully. +[2025-10-10 09:15:23,380] [INFO] [launch.py:348:main] Process 1920193 exits successfully. +[2025-10-10 09:15:27,385] [INFO] [launch.py:348:main] Process 1920187 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251010_084110.log +Timestamp: 2025-10-10 09:15:29 +===================================== diff --git a/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251009_043900.log b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251009_043900.log new file mode 100644 index 0000000000000000000000000000000000000000..fd95656a019459e3715299d743fad4b6ea469dc8 --- /dev/null +++ b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251009_043900.log @@ -0,0 +1,2314 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251009_043900.log +Timestamp: 2025-10-09 04:39:00 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a one-time only operation. You can interrupt this and resume the migration later on by calling `transformers.utils.move_cache()`. + 0it [00:00, ?it/s] 0it [00:00, ?it/s] +[2025-10-09 04:39:03,567] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 04:39:06,577] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-09 04:39:06,578] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 04:39:09,224] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 04:39:10,295] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-09 04:39:10,295] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-09 04:39:10,296] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-09 04:39:10,296] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-09 04:39:10,296] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-09 04:39:10,296] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-09 04:39:10,296] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-09 04:39:10,298] [INFO] [launch.py:253:main] process 788956 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 04:39:10,300] [INFO] [launch.py:253:main] process 788957 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 04:39:10,302] [INFO] [launch.py:253:main] process 788958 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 04:39:10,305] [INFO] [launch.py:253:main] process 788959 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 04:39:10,307] [INFO] [launch.py:253:main] process 788960 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 04:39:10,309] [INFO] [launch.py:253:main] process 788961 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 04:39:10,311] [INFO] [launch.py:253:main] process 788962 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 04:39:10,313] [INFO] [launch.py:253:main] process 788963 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 04:39:17,062] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 04:39:17,161] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 04:39:17,197] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 04:39:17,242] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 04:39:17,242] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 04:39:17,246] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 04:39:17,249] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 04:39:17,256] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 04:39:17,543] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 04:39:17,543] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-09 04:39:17,571] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 04:39:17,604] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 04:39:17,648] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 04:39:17,651] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 04:39:17,653] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 04:39:17,661] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 04:39:17,671] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:788956:788956 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788956:788956 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788956:788956 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:788956:788956 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:788956:788956 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:788956:788956 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:788957:788957 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:788957:788957 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788957:788957 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788961:788961 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:788957:788957 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:788961:788961 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788957:788957 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:788957:788957 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:788961:788961 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788961:788961 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:788961:788961 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:788961:788961 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:788962:788962 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:788962:788962 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788962:788962 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788962:788962 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:788962:788962 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:788962:788962 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:788958:788958 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:788958:788958 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788958:788958 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788958:788958 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:788958:788958 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:788958:788958 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:788959:788959 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:788959:788959 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788959:788959 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788959:788959 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:788959:788959 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:788959:788959 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:788963:788963 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:788963:788963 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788963:788963 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788963:788963 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:788963:788963 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:788963:788963 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:788960:788960 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:788960:788960 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788960:788960 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788960:788960 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:788960:788960 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:788960:788960 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO ncclCommInitRank comm 0x5561e90a3a10 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x3f32c50910475564 - Init START +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO ncclCommInitRank comm 0x558a43b34cc0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x3f32c50910475564 - Init START +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO ncclCommInitRank comm 0x557b8e7f95c0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x3f32c50910475564 - Init START +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO ncclCommInitRank comm 0x55ec667177c0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x3f32c50910475564 - Init START +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO ncclCommInitRank comm 0x558da02a75a0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x3f32c50910475564 - Init START +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO ncclCommInitRank comm 0x56362869b100 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x3f32c50910475564 - Init START +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO ncclCommInitRank comm 0x55b57605db80 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x3f32c50910475564 - Init START +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO ncclCommInitRank comm 0x56062b8ce900 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x3f32c50910475564 - Init START +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO comm 0x557b8e7f95c0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO comm 0x56062b8ce900 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO comm 0x558a43b34cc0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO comm 0x558da02a75a0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO comm 0x55ec667177c0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO comm 0x5561e90a3a10 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO comm 0x56362869b100 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO comm 0x55b57605db80 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:788960:790565 [4] NCCL INFO ncclCommInitRank comm 0x5561e90a3a10 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x3f32c50910475564 - Init COMPLETE +ywang29-vrdb-test1-worker-0:788961:790558 [5] NCCL INFO ncclCommInitRank comm 0x558da02a75a0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x3f32c50910475564 - Init COMPLETE +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:788962:790561 [6] NCCL INFO ncclCommInitRank comm 0x558a43b34cc0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x3f32c50910475564 - Init COMPLETE +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:788963:790564 [7] NCCL INFO ncclCommInitRank comm 0x557b8e7f95c0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x3f32c50910475564 - Init COMPLETE +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:788959:790563 [3] NCCL INFO ncclCommInitRank comm 0x56362869b100 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x3f32c50910475564 - Init COMPLETE +ywang29-vrdb-test1-worker-0:788958:790562 [2] NCCL INFO ncclCommInitRank comm 0x55ec667177c0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x3f32c50910475564 - Init COMPLETE +ywang29-vrdb-test1-worker-0:788957:790559 [1] NCCL INFO ncclCommInitRank comm 0x55b57605db80 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x3f32c50910475564 - Init COMPLETE +ywang29-vrdb-test1-worker-0:788956:790541 [0] NCCL INFO ncclCommInitRank comm 0x56062b8ce900 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x3f32c50910475564 - Init COMPLETE +[2025-10-09 04:40:07,653] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-09 04:40:10,643] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-09 04:40:23,645 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-09 04:40:23,651 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:001->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:788959:795564 [3] NCCL INFO ncclCommInitRank comm 0x7f149406b1f0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xd0501a2a7826030e - Init COMPLETE +ywang29-vrdb-test1-worker-0:788957:795565 [1] NCCL INFO ncclCommInitRank comm 0x7f851406a980 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xd0501a2a7826030e - Init COMPLETE +ywang29-vrdb-test1-worker-0:788962:795563 [6] NCCL INFO ncclCommInitRank comm 0x7fa99806b220 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xd0501a2a7826030e - Init COMPLETE +ywang29-vrdb-test1-worker-0:788961:795561 [5] NCCL INFO ncclCommInitRank comm 0x7f928806b250 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xd0501a2a7826030e - Init COMPLETE +ywang29-vrdb-test1-worker-0:788963:795566 [7] NCCL INFO ncclCommInitRank comm 0x7f8bd006a950 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xd0501a2a7826030e - Init COMPLETE +ywang29-vrdb-test1-worker-0:788958:795562 [2] NCCL INFO ncclCommInitRank comm 0x7f5a2406aaf0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xd0501a2a7826030e - Init COMPLETE +ywang29-vrdb-test1-worker-0:788956:795559 [0] NCCL INFO ncclCommInitRank comm 0x7f5cd406b0d0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xd0501a2a7826030e - Init COMPLETE +ywang29-vrdb-test1-worker-0:788960:795560 [4] NCCL INFO ncclCommInitRank comm 0x7fc36c06a820 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xd0501a2a7826030e - Init COMPLETE + 0%| | 1/520 [00:12<1:47:45, 12.46s/it] {'loss': 2.0453, 'grad_norm': 0.004835272375227129, 'learning_rate': 0.0625, 'epoch': 0.0} + 0%| | 1/520 [00:12<1:47:45, 12.46s/it] 0%| | 2/520 [00:16<1:02:48, 7.27s/it] {'loss': 2.0549, 'grad_norm': 0.005248487271927371, 'learning_rate': 0.125, 'epoch': 0.0} + 0%| | 2/520 [00:16<1:02:48, 7.27s/it] 1%| | 3/520 [00:19<48:36, 5.64s/it] {'loss': 2.1899, 'grad_norm': 0.006006736708343085, 'learning_rate': 0.1875, 'epoch': 0.01} + 1%| | 3/520 [00:19<48:36, 5.64s/it] 1%| | 4/520 [00:23<41:41, 4.85s/it] {'loss': 1.6704, 'grad_norm': 0.0015019104983165386, 'learning_rate': 0.25, 'epoch': 0.01} + 1%| | 4/520 [00:23<41:41, 4.85s/it] 1%| | 5/520 [00:27<38:05, 4.44s/it] {'loss': 1.6621, 'grad_norm': 0.0008069400809621924, 'learning_rate': 0.3125, 'epoch': 0.01} + 1%| | 5/520 [00:27<38:05, 4.44s/it] 1%| | 6/520 [00:31<36:25, 4.25s/it] {'loss': 1.3803, 'grad_norm': 0.0006378539578018832, 'learning_rate': 0.375, 'epoch': 0.01} + 1%| | 6/520 [00:31<36:25, 4.25s/it] 1%|▏ | 7/520 [00:34<35:26, 4.15s/it] {'loss': 1.4158, 'grad_norm': 0.0009172625331467647, 'learning_rate': 0.4375, 'epoch': 0.01} + 1%|▏ | 7/520 [00:34<35:26, 4.15s/it] 2%|▏ | 8/520 [00:39<36:13, 4.24s/it] {'loss': 1.4584, 'grad_norm': 0.0011323686825777705, 'learning_rate': 0.5, 'epoch': 0.02} + 2%|▏ | 8/520 [00:39<36:13, 4.24s/it] 2%|▏ | 9/520 [00:43<36:28, 4.28s/it] {'loss': 1.5345, 'grad_norm': 0.0020828183246376767, 'learning_rate': 0.5625, 'epoch': 0.02} + 2%|▏ | 9/520 [00:43<36:28, 4.28s/it] 2%|▏ | 10/520 [00:47<35:17, 4.15s/it] {'loss': 1.405, 'grad_norm': 0.0030884085702398215, 'learning_rate': 0.625, 'epoch': 0.02} + 2%|▏ | 10/520 [00:47<35:17, 4.15s/it] 2%|▏ | 11/520 [00:51<34:37, 4.08s/it] {'loss': 1.568, 'grad_norm': 0.004100927719405666, 'learning_rate': 0.6875, 'epoch': 0.02} + 2%|▏ | 11/520 [00:51<34:37, 4.08s/it] 2%|▏ | 12/520 [00:55<34:01, 4.02s/it] {'loss': 1.6142, 'grad_norm': 0.006958883413410033, 'learning_rate': 0.75, 'epoch': 0.02} + 2%|▏ | 12/520 [00:55<34:01, 4.02s/it][2025-10-09 04:41:28,575] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [00:59<35:17, 4.18s/it] {'loss': 2.1965, 'grad_norm': 0.024390862657224364, 'learning_rate': 0.8125, 'epoch': 0.03} + 2%|▎ | 13/520 [00:59<35:17, 4.18s/it] 3%|▎ | 14/520 [01:03<34:22, 4.08s/it] {'loss': 2.7592, 'grad_norm': 0.03942335343533018, 'learning_rate': 0.875, 'epoch': 0.03} + 3%|▎ | 14/520 [01:03<34:22, 4.08s/it] 3%|▎ | 15/520 [01:07<33:53, 4.03s/it] {'loss': 3.8028, 'grad_norm': 0.1261602106420422, 'learning_rate': 0.9375, 'epoch': 0.03} + 3%|▎ | 15/520 [01:07<33:53, 4.03s/it] 3%|▎ | 16/520 [01:11<33:15, 3.96s/it] {'loss': 6.7193, 'grad_norm': 0.5841927783865534, 'learning_rate': 1.0, 'epoch': 0.03} + 3%|▎ | 16/520 [01:11<33:15, 3.96s/it] 3%|▎ | 17/520 [01:15<33:04, 3.95s/it] {'loss': 4.7547, 'grad_norm': 0.08624163450780457, 'learning_rate': 0.999990286465769, 'epoch': 0.03} + 3%|▎ | 17/520 [01:15<33:04, 3.95s/it] 3%|▎ | 18/520 [01:19<32:43, 3.91s/it] {'loss': 4.6885, 'grad_norm': 0.17425933429761314, 'learning_rate': 0.9999611462404874, 'epoch': 0.03} + 3%|▎ | 18/520 [01:19<32:43, 3.91s/it] 4%|▎ | 19/520 [01:23<32:36, 3.91s/it] {'loss': 15.5456, 'grad_norm': 0.6764792027821689, 'learning_rate': 0.9999125804563732, 'epoch': 0.04} + 4%|▎ | 19/520 [01:23<32:36, 3.91s/it] 4%|▍ | 20/520 [01:27<32:24, 3.89s/it] {'loss': 8.2973, 'grad_norm': 0.21668440765956298, 'learning_rate': 0.9998445910004081, 'epoch': 0.04} + 4%|▍ | 20/520 [01:27<32:24, 3.89s/it] 4%|▍ | 21/520 [01:30<32:22, 3.89s/it] {'loss': 6.106, 'grad_norm': 0.07472585918973111, 'learning_rate': 0.9997571805142638, 'epoch': 0.04} + 4%|▍ | 21/520 [01:30<32:22, 3.89s/it] 4%|▍ | 22/520 [01:34<32:11, 3.88s/it] {'loss': 4.8756, 'grad_norm': 0.03796012812346558, 'learning_rate': 0.9996503523941993, 'epoch': 0.04} + 4%|▍ | 22/520 [01:34<32:11, 3.88s/it] 4%|▍ | 23/520 [01:38<32:14, 3.89s/it] {'loss': 4.2556, 'grad_norm': 0.027276086517386697, 'learning_rate': 0.999524110790929, 'epoch': 0.04} + 4%|▍ | 23/520 [01:38<32:14, 3.89s/it] 5%|▍ | 24/520 [01:42<32:00, 3.87s/it] {'loss': 4.611, 'grad_norm': 0.02607834502448889, 'learning_rate': 0.9993784606094611, 'epoch': 0.05} + 5%|▍ | 24/520 [01:42<32:00, 3.87s/it] 5%|▍ | 25/520 [01:46<32:03, 3.89s/it] {'loss': 3.8572, 'grad_norm': 0.020794573699020324, 'learning_rate': 0.9992134075089083, 'epoch': 0.05} + 5%|▍ | 25/520 [01:46<32:03, 3.89s/it] 5%|▌ | 26/520 [01:50<31:51, 3.87s/it] {'loss': 3.2606, 'grad_norm': 0.007139278707968689, 'learning_rate': 0.999028957902266, 'epoch': 0.05} + 5%|▌ | 26/520 [01:50<31:51, 3.87s/it] 5%|▌ | 27/520 [01:53<31:11, 3.80s/it] {'loss': 2.8077, 'grad_norm': 0.005809097492234402, 'learning_rate': 0.9988251189561644, 'epoch': 0.05} + 5%|▌ | 27/520 [01:53<31:11, 3.80s/it] 5%|▌ | 28/520 [01:57<30:54, 3.77s/it] {'loss': 2.5452, 'grad_norm': 0.003439889027467958, 'learning_rate': 0.99860189859059, 'epoch': 0.05} + 5%|▌ | 28/520 [01:57<30:54, 3.77s/it] 6%|▌ | 29/520 [02:01<30:49, 3.77s/it] {'loss': 2.5141, 'grad_norm': 0.004070622216848408, 'learning_rate': 0.9983593054785775, 'epoch': 0.06} + 6%|▌ | 29/520 [02:01<30:49, 3.77s/it] 6%|▌ | 30/520 [02:05<30:35, 3.75s/it] {'loss': 3.2761, 'grad_norm': 0.007234681720696524, 'learning_rate': 0.9980973490458728, 'epoch': 0.06} + 6%|▌ | 30/520 [02:05<30:35, 3.75s/it] 6%|▌ | 31/520 [02:08<30:14, 3.71s/it] {'loss': 2.45, 'grad_norm': 0.0024294567051227726, 'learning_rate': 0.9978160394705669, 'epoch': 0.06} + 6%|▌ | 31/520 [02:08<30:14, 3.71s/it] 6%|▌ | 32/520 [02:12<30:17, 3.72s/it] {'loss': 3.6819, 'grad_norm': 0.00572257685673942, 'learning_rate': 0.9975153876827008, 'epoch': 0.06} + 6%|▌ | 32/520 [02:12<30:17, 3.72s/it] 6%|▋ | 33/520 [02:16<29:58, 3.69s/it] {'loss': 2.3589, 'grad_norm': 0.002810385794415528, 'learning_rate': 0.9971954053638399, 'epoch': 0.06} + 6%|▋ | 33/520 [02:16<29:58, 3.69s/it] 7%|▋ | 34/520 [02:19<29:54, 3.69s/it] {'loss': 2.2541, 'grad_norm': 0.0021582911914928868, 'learning_rate': 0.9968561049466214, 'epoch': 0.07} + 7%|▋ | 34/520 [02:19<29:54, 3.69s/it] 7%|▋ | 35/520 [02:23<29:39, 3.67s/it] {'loss': 2.2331, 'grad_norm': 0.0020577287546921686, 'learning_rate': 0.9964974996142697, 'epoch': 0.07} + 7%|▋ | 35/520 [02:23<29:39, 3.67s/it] 7%|▋ | 36/520 [02:27<29:36, 3.67s/it] {'loss': 2.3675, 'grad_norm': 0.0017206971209472398, 'learning_rate': 0.9961196033000861, 'epoch': 0.07} + 7%|▋ | 36/520 [02:27<29:36, 3.67s/it] 7%|▋ | 37/520 [02:30<29:31, 3.67s/it] {'loss': 2.69, 'grad_norm': 0.004072554340562795, 'learning_rate': 0.9957224306869052, 'epoch': 0.07} + 7%|▋ | 37/520 [02:30<29:31, 3.67s/it] 7%|▋ | 38/520 [02:34<29:30, 3.67s/it] {'loss': 2.3743, 'grad_norm': 0.0016267095613669111, 'learning_rate': 0.9953059972065263, 'epoch': 0.07} + 7%|▋ | 38/520 [02:34<29:30, 3.67s/it] 8%|▊ | 39/520 [02:38<29:54, 3.73s/it] {'loss': 2.2059, 'grad_norm': 0.0017474275965813546, 'learning_rate': 0.994870319039113, 'epoch': 0.07} + 8%|▊ | 39/520 [02:38<29:54, 3.73s/it] 8%|▊ | 40/520 [02:42<30:03, 3.76s/it] {'loss': 2.1504, 'grad_norm': 0.001427871081932903, 'learning_rate': 0.9944154131125642, 'epoch': 0.08} + 8%|▊ | 40/520 [02:42<30:03, 3.76s/it] 8%|▊ | 41/520 [02:45<29:58, 3.75s/it] {'loss': 2.1366, 'grad_norm': 0.0017442265127798383, 'learning_rate': 0.9939412971018573, 'epoch': 0.08} + 8%|▊ | 41/520 [02:45<29:58, 3.75s/it] 8%|▊ | 42/520 [02:49<30:00, 3.77s/it] {'loss': 2.2908, 'grad_norm': 0.0022299699188325223, 'learning_rate': 0.9934479894283605, 'epoch': 0.08} + 8%|▊ | 42/520 [02:49<30:00, 3.77s/it] 8%|▊ | 43/520 [02:53<29:41, 3.73s/it] {'loss': 2.4689, 'grad_norm': 0.0021970404766106753, 'learning_rate': 0.9929355092591179, 'epoch': 0.08} + 8%|▊ | 43/520 [02:53<29:41, 3.73s/it] 8%|▊ | 44/520 [02:56<29:28, 3.72s/it] {'loss': 2.5904, 'grad_norm': 0.0025433410851968125, 'learning_rate': 0.9924038765061041, 'epoch': 0.08} + 8%|▊ | 44/520 [02:56<29:28, 3.72s/it] 9%|▊ | 45/520 [03:00<29:21, 3.71s/it] {'loss': 2.0772, 'grad_norm': 0.001273222463327911, 'learning_rate': 0.9918531118254507, 'epoch': 0.09} + 9%|▊ | 45/520 [03:00<29:21, 3.71s/it] 9%|▉ | 46/520 [03:04<29:12, 3.70s/it] {'loss': 2.6148, 'grad_norm': 0.0016372828763168694, 'learning_rate': 0.9912832366166442, 'epoch': 0.09} + 9%|▉ | 46/520 [03:04<29:12, 3.70s/it] 9%|▉ | 47/520 [03:08<29:09, 3.70s/it] {'loss': 2.1307, 'grad_norm': 0.0013991099046677207, 'learning_rate': 0.9906942730216939, 'epoch': 0.09} + 9%|▉ | 47/520 [03:08<29:09, 3.70s/it] 9%|▉ | 48/520 [03:11<29:04, 3.69s/it] {'loss': 2.0834, 'grad_norm': 0.0013263260626432028, 'learning_rate': 0.9900862439242719, 'epoch': 0.09} + 9%|▉ | 48/520 [03:11<29:04, 3.69s/it] 9%|▉ | 49/520 [03:15<28:53, 3.68s/it] {'loss': 2.0485, 'grad_norm': 0.001309625282479708, 'learning_rate': 0.9894591729488242, 'epoch': 0.09} + 9%|▉ | 49/520 [03:15<28:53, 3.68s/it] 10%|▉ | 50/520 [03:19<28:51, 3.68s/it] {'loss': 2.0415, 'grad_norm': 0.0012337706969645487, 'learning_rate': 0.9888130844596523, 'epoch': 0.1} + 10%|▉ | 50/520 [03:19<28:51, 3.68s/it] 10%|▉ | 51/520 [03:22<28:48, 3.68s/it] {'loss': 1.9337, 'grad_norm': 0.001235084386475081, 'learning_rate': 0.9881480035599667, 'epoch': 0.1} + 10%|▉ | 51/520 [03:22<28:48, 3.68s/it] 10%|█ | 52/520 [03:26<28:48, 3.69s/it] {'loss': 2.0989, 'grad_norm': 0.0012951544305095557, 'learning_rate': 0.9874639560909118, 'epoch': 0.1} + 10%|█ | 52/520 [03:26<28:48, 3.69s/it] 10%|█ | 53/520 [03:30<28:42, 3.69s/it] {'loss': 2.0975, 'grad_norm': 0.001237385410783805, 'learning_rate': 0.9867609686305616, 'epoch': 0.1} + 10%|█ | 53/520 [03:30<28:42, 3.69s/it] 10%|█ | 54/520 [03:33<28:47, 3.71s/it] {'loss': 1.9161, 'grad_norm': 0.0011041805243291374, 'learning_rate': 0.9860390684928872, 'epoch': 0.1} + 10%|█ | 54/520 [03:33<28:47, 3.71s/it] 11%|█ | 55/520 [03:37<28:41, 3.70s/it] {'loss': 1.9198, 'grad_norm': 0.0011669999915347605, 'learning_rate': 0.9852982837266955, 'epoch': 0.11} + 11%|█ | 55/520 [03:37<28:41, 3.70s/it] 11%|█ | 56/520 [03:41<28:32, 3.69s/it] {'loss': 2.069, 'grad_norm': 0.0010981950095947381, 'learning_rate': 0.984538643114539, 'epoch': 0.11} + 11%|█ | 56/520 [03:41<28:32, 3.69s/it] 11%|█ | 57/520 [03:45<28:45, 3.73s/it] {'loss': 1.9092, 'grad_norm': 0.0011366956808548962, 'learning_rate': 0.9837601761715982, 'epoch': 0.11} + 11%|█ | 57/520 [03:45<28:45, 3.73s/it] 11%|█ | 58/520 [03:48<29:07, 3.78s/it] {'loss': 2.0295, 'grad_norm': 0.0009131660108304226, 'learning_rate': 0.9829629131445341, 'epoch': 0.11} + 11%|█ | 58/520 [03:48<29:07, 3.78s/it] 11%|█▏ | 59/520 [03:52<29:21, 3.82s/it] {'loss': 2.1883, 'grad_norm': 0.0012064406612611695, 'learning_rate': 0.9821468850103139, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:52<29:21, 3.82s/it] 12%|█▏ | 60/520 [03:56<29:27, 3.84s/it] {'loss': 1.9827, 'grad_norm': 0.0009637728384511795, 'learning_rate': 0.981312123475006, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:56<29:27, 3.84s/it] 12%|█▏ | 61/520 [04:00<29:31, 3.86s/it] {'loss': 2.3848, 'grad_norm': 0.0011979484626825647, 'learning_rate': 0.9804586609725499, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:00<29:31, 3.86s/it] 12%|█▏ | 62/520 [04:04<29:28, 3.86s/it] {'loss': 1.9046, 'grad_norm': 0.0010804184116681133, 'learning_rate': 0.9795865306634939, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:04<29:28, 3.86s/it] 12%|█▏ | 63/520 [04:08<29:24, 3.86s/it] {'loss': 1.9679, 'grad_norm': 0.0009778006831120975, 'learning_rate': 0.978695766433709, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:08<29:24, 3.86s/it] 12%|█▏ | 64/520 [04:12<28:49, 3.79s/it] {'loss': 1.9762, 'grad_norm': 0.0010167650126329754, 'learning_rate': 0.9777864028930704, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:12<28:49, 3.79s/it] 12%|█▎ | 65/520 [04:15<28:37, 3.78s/it] {'loss': 1.9988, 'grad_norm': 0.001070346078389618, 'learning_rate': 0.9768584753741134, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:15<28:37, 3.78s/it] 13%|█▎ | 66/520 [04:19<28:25, 3.76s/it] {'loss': 1.949, 'grad_norm': 0.0010117332506036083, 'learning_rate': 0.9759120199306612, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:19<28:25, 3.76s/it] 13%|█▎ | 67/520 [04:23<28:14, 3.74s/it] {'loss': 1.7505, 'grad_norm': 0.0009559299591683861, 'learning_rate': 0.9749470733364229, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:23<28:14, 3.74s/it] 13%|█▎ | 68/520 [04:26<28:05, 3.73s/it] {'loss': 1.7665, 'grad_norm': 0.0008936603826535514, 'learning_rate': 0.9739636730835659, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:26<28:05, 3.73s/it] 13%|█▎ | 69/520 [04:30<27:56, 3.72s/it] {'loss': 1.7906, 'grad_norm': 0.0009808048777068016, 'learning_rate': 0.972961857381258, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:30<27:56, 3.72s/it] 13%|█▎ | 70/520 [04:34<27:47, 3.71s/it] {'loss': 1.8641, 'grad_norm': 0.0010930119400331898, 'learning_rate': 0.9719416651541838, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:34<27:47, 3.71s/it] 14%|█▎ | 71/520 [04:37<27:42, 3.70s/it] {'loss': 1.7219, 'grad_norm': 0.0009150745182447584, 'learning_rate': 0.9709031360410317, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:37<27:42, 3.70s/it] 14%|█▍ | 72/520 [04:41<27:34, 3.69s/it] {'loss': 1.8996, 'grad_norm': 0.0009286892067452192, 'learning_rate': 0.9698463103929542, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:41<27:34, 3.69s/it] 14%|█▍ | 73/520 [04:45<27:33, 3.70s/it] {'loss': 1.6959, 'grad_norm': 0.0009100695058240425, 'learning_rate': 0.9687712292719997, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:45<27:33, 3.70s/it] 14%|█▍ | 74/520 [04:49<27:36, 3.71s/it] {'loss': 1.8551, 'grad_norm': 0.0010478984935629637, 'learning_rate': 0.967677934449517, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:49<27:36, 3.71s/it] 14%|█▍ | 75/520 [04:52<27:32, 3.71s/it] {'loss': 1.7072, 'grad_norm': 0.0009216251572571288, 'learning_rate': 0.9665664684045332, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:52<27:32, 3.71s/it] 15%|█▍ | 76/520 [04:56<27:28, 3.71s/it] {'loss': 2.2838, 'grad_norm': 0.0010518673352381493, 'learning_rate': 0.9654368743221021, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:56<27:28, 3.71s/it] 15%|█▍ | 77/520 [05:00<27:31, 3.73s/it] {'loss': 1.6835, 'grad_norm': 0.0011137151261848073, 'learning_rate': 0.9642891960916268, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:00<27:31, 3.73s/it] 15%|█▌ | 78/520 [05:03<27:22, 3.72s/it] {'loss': 1.8092, 'grad_norm': 0.0009277119580631131, 'learning_rate': 0.9631234783051543, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:03<27:22, 3.72s/it] 15%|█▌ | 79/520 [05:07<27:14, 3.71s/it] {'loss': 1.755, 'grad_norm': 0.000821831172798817, 'learning_rate': 0.9619397662556434, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:07<27:14, 3.71s/it] 15%|█▌ | 80/520 [05:11<27:14, 3.72s/it] {'loss': 2.2871, 'grad_norm': 0.0010217082436103754, 'learning_rate': 0.9607381059352038, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:11<27:14, 3.72s/it] 16%|█▌ | 81/520 [05:15<27:08, 3.71s/it] {'loss': 2.0063, 'grad_norm': 0.0013263849600011392, 'learning_rate': 0.9595185440333103, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:15<27:08, 3.71s/it] 16%|█▌ | 82/520 [05:19<27:36, 3.78s/it] {'loss': 1.8466, 'grad_norm': 0.0008453027780764904, 'learning_rate': 0.9582811279349881, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:19<27:36, 3.78s/it] 16%|█▌ | 83/520 [05:23<28:20, 3.89s/it] {'loss': 1.8885, 'grad_norm': 0.0008505255235118423, 'learning_rate': 0.9570259057189716, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:23<28:20, 3.89s/it] 16%|█▌ | 84/520 [05:27<28:14, 3.89s/it] {'loss': 1.8528, 'grad_norm': 0.0008792838657149599, 'learning_rate': 0.9557529261558366, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:27<28:14, 3.89s/it] 16%|█▋ | 85/520 [05:30<28:00, 3.86s/it] {'loss': 1.8647, 'grad_norm': 0.0008646634906934699, 'learning_rate': 0.9544622387061055, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:30<28:00, 3.86s/it] 17%|█▋ | 86/520 [05:34<27:29, 3.80s/it] {'loss': 1.9655, 'grad_norm': 0.0008719719159763047, 'learning_rate': 0.953153893518325, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:34<27:29, 3.80s/it] 17%|█▋ | 87/520 [05:38<27:08, 3.76s/it] {'loss': 2.2367, 'grad_norm': 0.001012229645034807, 'learning_rate': 0.9518279414271184, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:38<27:08, 3.76s/it] 17%|█▋ | 88/520 [05:41<26:52, 3.73s/it] {'loss': 2.4188, 'grad_norm': 0.001000579030540645, 'learning_rate': 0.9504844339512095, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:41<26:52, 3.73s/it] 17%|█▋ | 89/520 [05:45<26:35, 3.70s/it] {'loss': 1.8266, 'grad_norm': 0.000832065832487258, 'learning_rate': 0.9491234232914221, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:45<26:35, 3.70s/it] 17%|█▋ | 90/520 [05:49<26:40, 3.72s/it] {'loss': 1.7419, 'grad_norm': 0.0008251051840585088, 'learning_rate': 0.9477449623286505, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:49<26:40, 3.72s/it] 18%|█▊ | 91/520 [05:52<26:26, 3.70s/it] {'loss': 1.8336, 'grad_norm': 0.000754301581112783, 'learning_rate': 0.9463491046218058, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:52<26:26, 3.70s/it] 18%|█▊ | 92/520 [05:56<26:16, 3.68s/it] {'loss': 1.762, 'grad_norm': 0.0008484628098631748, 'learning_rate': 0.9449359044057344, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:56<26:16, 3.68s/it] 18%|█▊ | 93/520 [06:00<26:12, 3.68s/it] {'loss': 1.7291, 'grad_norm': 0.0008255219269528636, 'learning_rate': 0.9435054165891108, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:00<26:12, 3.68s/it] 18%|█▊ | 94/520 [06:03<26:00, 3.66s/it] {'loss': 1.8896, 'grad_norm': 0.0008239378758012931, 'learning_rate': 0.9420576967523049, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:03<26:00, 3.66s/it] 18%|█▊ | 95/520 [06:07<26:08, 3.69s/it] {'loss': 1.7368, 'grad_norm': 0.0008895934223454412, 'learning_rate': 0.9405928011452211, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:07<26:08, 3.69s/it] 18%|█▊ | 96/520 [06:11<26:04, 3.69s/it] {'loss': 1.7434, 'grad_norm': 0.0007513113558068938, 'learning_rate': 0.9391107866851143, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:11<26:04, 3.69s/it] 19%|█▊ | 97/520 [06:15<26:12, 3.72s/it] {'loss': 1.7181, 'grad_norm': 0.0008495634288120295, 'learning_rate': 0.9376117109543769, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:15<26:12, 3.72s/it] 19%|█▉ | 98/520 [06:18<26:04, 3.71s/it] {'loss': 1.7261, 'grad_norm': 0.0006979848994597109, 'learning_rate': 0.9360956321983027, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:18<26:04, 3.71s/it] 19%|█▉ | 99/520 [06:22<25:54, 3.69s/it] {'loss': 1.7375, 'grad_norm': 0.0007482824923381229, 'learning_rate': 0.9345626093228232, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:22<25:54, 3.69s/it] 19%|█▉ | 100/520 [06:26<25:51, 3.69s/it] {'loss': 2.0203, 'grad_norm': 0.0008331518747274918, 'learning_rate': 0.9330127018922194, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:26<25:51, 3.69s/it] 19%|█▉ | 101/520 [06:29<25:44, 3.69s/it] {'loss': 1.7397, 'grad_norm': 0.0007684989962245398, 'learning_rate': 0.9314459701268065, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:29<25:44, 3.69s/it] 20%|█▉ | 102/520 [06:33<25:34, 3.67s/it] {'loss': 1.722, 'grad_norm': 0.0007893228574660739, 'learning_rate': 0.9298624749005951, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:33<25:34, 3.67s/it] 20%|█▉ | 103/520 [06:37<25:31, 3.67s/it] {'loss': 1.6747, 'grad_norm': 0.0008497646556008556, 'learning_rate': 0.9282622777389258, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:37<25:31, 3.67s/it] 20%|██ | 104/520 [06:40<25:33, 3.69s/it] {'loss': 1.7601, 'grad_norm': 0.0007897387235498328, 'learning_rate': 0.9266454408160778, 'epoch': 0.2} + 20%|██ | 104/520 [06:40<25:33, 3.69s/it] 20%|██ | 105/520 [06:44<25:27, 3.68s/it] {'loss': 1.7252, 'grad_norm': 0.0007500741397721794, 'learning_rate': 0.9250120269528546, 'epoch': 0.2} + 20%|██ | 105/520 [06:44<25:27, 3.68s/it] 20%|██ | 106/520 [06:48<25:24, 3.68s/it] {'loss': 2.0427, 'grad_norm': 0.000974118984188845, 'learning_rate': 0.9233620996141421, 'epoch': 0.2} + 20%|██ | 106/520 [06:48<25:24, 3.68s/it] 21%|██ | 107/520 [06:51<25:26, 3.70s/it] {'loss': 2.0495, 'grad_norm': 0.0008306181847952827, 'learning_rate': 0.9216957229064429, 'epoch': 0.21} + 21%|██ | 107/520 [06:51<25:26, 3.70s/it] 21%|██ | 108/520 [06:55<25:31, 3.72s/it] {'loss': 1.6707, 'grad_norm': 0.0007623777416332712, 'learning_rate': 0.9200129615753858, 'epoch': 0.21} + 21%|██ | 108/520 [06:55<25:31, 3.72s/it] 21%|██ | 109/520 [06:59<25:45, 3.76s/it] {'loss': 2.0213, 'grad_norm': 0.0007922660910369982, 'learning_rate': 0.9183138810032099, 'epoch': 0.21} + 21%|██ | 109/520 [06:59<25:45, 3.76s/it] 21%|██ | 110/520 [07:03<25:48, 3.78s/it] {'loss': 1.8741, 'grad_norm': 0.0007485503918082569, 'learning_rate': 0.9165985472062245, 'epoch': 0.21} + 21%|██ | 110/520 [07:03<25:48, 3.78s/it] 21%|██▏ | 111/520 [07:07<25:51, 3.79s/it] {'loss': 1.9137, 'grad_norm': 0.0007695347964851623, 'learning_rate': 0.9148670268322437, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:07<25:51, 3.79s/it] 22%|██▏ | 112/520 [07:10<25:42, 3.78s/it] {'loss': 1.7844, 'grad_norm': 0.000727353821942631, 'learning_rate': 0.9131193871579975, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:10<25:42, 3.78s/it] 22%|██▏ | 113/520 [07:14<25:31, 3.76s/it] {'loss': 1.6027, 'grad_norm': 0.0006926179410792782, 'learning_rate': 0.9113556960865167, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:14<25:31, 3.76s/it] 22%|██▏ | 114/520 [07:18<25:24, 3.75s/it] {'loss': 1.7499, 'grad_norm': 0.0007025682564559461, 'learning_rate': 0.909576022144496, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:18<25:24, 3.75s/it] 22%|██▏ | 115/520 [07:22<25:14, 3.74s/it] {'loss': 1.9028, 'grad_norm': 0.0007658564043253811, 'learning_rate': 0.9077804344796301, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:22<25:14, 3.74s/it] 22%|██▏ | 116/520 [07:25<25:10, 3.74s/it] {'loss': 1.8524, 'grad_norm': 0.000760477857135314, 'learning_rate': 0.9059690028579284, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:25<25:10, 3.74s/it] 22%|██▎ | 117/520 [07:29<25:08, 3.74s/it] {'loss': 1.8483, 'grad_norm': 0.0007530331905537078, 'learning_rate': 0.9041417976610027, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:29<25:08, 3.74s/it] 23%|██▎ | 118/520 [07:33<24:52, 3.71s/it] {'loss': 1.6866, 'grad_norm': 0.0006514606584855413, 'learning_rate': 0.9022988898833342, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:33<24:52, 3.71s/it] 23%|██▎ | 119/520 [07:36<24:57, 3.73s/it] {'loss': 1.6277, 'grad_norm': 0.0007031125622710394, 'learning_rate': 0.900440351129514, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:36<24:57, 3.73s/it] 23%|██▎ | 120/520 [07:40<24:51, 3.73s/it] {'loss': 1.6718, 'grad_norm': 0.0008274203030220079, 'learning_rate': 0.8985662536114613, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:40<24:51, 3.73s/it] 23%|██▎ | 121/520 [07:44<24:53, 3.74s/it] {'loss': 1.7325, 'grad_norm': 0.0007101068977574629, 'learning_rate': 0.8966766701456176, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:44<24:53, 3.74s/it] 23%|██▎ | 122/520 [07:48<25:04, 3.78s/it] {'loss': 1.6063, 'grad_norm': 0.0006889470214242715, 'learning_rate': 0.8947716741501177, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:48<25:04, 3.78s/it] 24%|██▎ | 123/520 [07:51<24:44, 3.74s/it] {'loss': 2.0905, 'grad_norm': 0.0007949458354375838, 'learning_rate': 0.8928513396419369, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:51<24:44, 3.74s/it] 24%|██▍ | 124/520 [07:55<24:36, 3.73s/it] {'loss': 1.7214, 'grad_norm': 0.0007641136305111364, 'learning_rate': 0.890915741234015, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:55<24:36, 3.73s/it] 24%|██▍ | 125/520 [07:59<24:33, 3.73s/it] {'loss': 1.6675, 'grad_norm': 0.0007473979032939177, 'learning_rate': 0.8889649541323574, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:59<24:33, 3.73s/it] 24%|██▍ | 126/520 [08:03<25:37, 3.90s/it] {'loss': 1.9094, 'grad_norm': 0.0006945906139804892, 'learning_rate': 0.8869990541331138, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:03<25:37, 3.90s/it] 24%|██▍ | 127/520 [08:07<25:10, 3.84s/it] {'loss': 1.6714, 'grad_norm': 0.0007536149855857944, 'learning_rate': 0.8850181176196315, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:07<25:10, 3.84s/it] 25%|██▍ | 128/520 [08:11<24:42, 3.78s/it] {'loss': 1.7251, 'grad_norm': 0.0006780700006930326, 'learning_rate': 0.883022221559489, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:11<24:42, 3.78s/it] 25%|██▍ | 129/520 [08:14<24:28, 3.76s/it] {'loss': 1.6126, 'grad_norm': 0.0006310084135179253, 'learning_rate': 0.8810114435015054, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:14<24:28, 3.76s/it] 25%|██▌ | 130/520 [08:18<24:14, 3.73s/it] {'loss': 1.7087, 'grad_norm': 0.0006238994256274668, 'learning_rate': 0.8789858615727265, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:18<24:14, 3.73s/it] 25%|██▌ | 131/520 [08:22<24:10, 3.73s/it] {'loss': 1.93, 'grad_norm': 0.0007812468627822798, 'learning_rate': 0.8769455544753899, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:22<24:10, 3.73s/it] 25%|██▌ | 132/520 [08:25<24:10, 3.74s/it] {'loss': 1.7565, 'grad_norm': 0.000811709205752919, 'learning_rate': 0.8748906014838671, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:25<24:10, 3.74s/it] 26%|██▌ | 133/520 [08:29<24:20, 3.77s/it] {'loss': 1.6701, 'grad_norm': 0.0007434181597368582, 'learning_rate': 0.8728210824415827, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:29<24:20, 3.77s/it] 26%|██▌ | 134/520 [08:33<24:17, 3.78s/it] {'loss': 1.7521, 'grad_norm': 0.0006734228807199924, 'learning_rate': 0.8707370777579133, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:33<24:17, 3.78s/it] 26%|██▌ | 135/520 [08:37<24:24, 3.80s/it] {'loss': 1.8331, 'grad_norm': 0.0006771256780792236, 'learning_rate': 0.868638668405062, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:37<24:24, 3.80s/it] 26%|██▌ | 136/520 [08:41<24:27, 3.82s/it] {'loss': 1.7288, 'grad_norm': 0.0007104901538164143, 'learning_rate': 0.8665259359149131, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:41<24:27, 3.82s/it] 26%|██▋ | 137/520 [08:45<24:22, 3.82s/it] {'loss': 1.6908, 'grad_norm': 0.0008342640681130229, 'learning_rate': 0.8643989623758642, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:45<24:22, 3.82s/it] 27%|██▋ | 138/520 [08:49<24:36, 3.87s/it] {'loss': 1.653, 'grad_norm': 0.0006851249545509506, 'learning_rate': 0.8622578304296363, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:49<24:36, 3.87s/it] 27%|██▋ | 139/520 [08:53<24:57, 3.93s/it] {'loss': 1.8154, 'grad_norm': 0.0008436953541639334, 'learning_rate': 0.8601026232680633, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:53<24:57, 3.93s/it] 27%|██▋ | 140/520 [08:57<24:49, 3.92s/it] {'loss': 1.968, 'grad_norm': 0.0008264853060054213, 'learning_rate': 0.8579334246298592, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:57<24:49, 3.92s/it] 27%|██▋ | 141/520 [09:00<24:38, 3.90s/it] {'loss': 1.7692, 'grad_norm': 0.0006495255744806334, 'learning_rate': 0.8557503187973651, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:00<24:38, 3.90s/it] 27%|██▋ | 142/520 [09:04<24:30, 3.89s/it] {'loss': 1.9817, 'grad_norm': 0.0009829585504852075, 'learning_rate': 0.8535533905932737, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:04<24:30, 3.89s/it] 28%|██▊ | 143/520 [09:08<24:22, 3.88s/it] {'loss': 1.7157, 'grad_norm': 0.0006880594094447601, 'learning_rate': 0.8513427253773346, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:08<24:22, 3.88s/it] 28%|██▊ | 144/520 [09:12<24:15, 3.87s/it] {'loss': 1.6212, 'grad_norm': 0.0008094934608535968, 'learning_rate': 0.8491184090430364, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:12<24:15, 3.87s/it] 28%|██▊ | 145/520 [09:16<24:15, 3.88s/it] {'loss': 1.5631, 'grad_norm': 0.0007185685772264106, 'learning_rate': 0.8468805280142708, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:16<24:15, 3.88s/it] 28%|██▊ | 146/520 [09:20<24:09, 3.88s/it] {'loss': 2.0401, 'grad_norm': 0.0011349175033242556, 'learning_rate': 0.8446291692419735, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:20<24:09, 3.88s/it] 28%|██▊ | 147/520 [09:24<24:05, 3.87s/it] {'loss': 1.5913, 'grad_norm': 0.000703095126165584, 'learning_rate': 0.8423644202007468, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:24<24:05, 3.87s/it] 28%|██▊ | 148/520 [09:28<24:04, 3.88s/it] {'loss': 1.6541, 'grad_norm': 0.0006386560720393239, 'learning_rate': 0.8400863688854596, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:28<24:04, 3.88s/it] 29%|██▊ | 149/520 [09:31<23:57, 3.88s/it] {'loss': 1.6129, 'grad_norm': 0.000712119178374959, 'learning_rate': 0.8377951038078302, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:31<23:57, 3.88s/it] 29%|██▉ | 150/520 [09:35<23:51, 3.87s/it] {'loss': 1.8263, 'grad_norm': 0.0008556439866740783, 'learning_rate': 0.835490713992985, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:35<23:51, 3.87s/it] 29%|██▉ | 151/520 [09:39<23:44, 3.86s/it] {'loss': 1.6199, 'grad_norm': 0.0007330706168619422, 'learning_rate': 0.833173288976002, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:39<23:44, 3.86s/it] 29%|██▉ | 152/520 [09:43<23:40, 3.86s/it] {'loss': 1.5917, 'grad_norm': 0.0008084122830514964, 'learning_rate': 0.8308429187984298, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:43<23:40, 3.86s/it] 29%|██▉ | 153/520 [09:47<23:37, 3.86s/it] {'loss': 1.6313, 'grad_norm': 0.0007660673133703868, 'learning_rate': 0.8284996940047903, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:47<23:37, 3.86s/it] 30%|██▉ | 154/520 [09:51<23:37, 3.87s/it] {'loss': 1.7278, 'grad_norm': 0.0007959055219228019, 'learning_rate': 0.8261437056390606, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:51<23:37, 3.87s/it] 30%|██▉ | 155/520 [09:55<23:29, 3.86s/it] {'loss': 1.5968, 'grad_norm': 0.0006575410699321087, 'learning_rate': 0.8237750452411352, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:55<23:29, 3.86s/it] 30%|███ | 156/520 [09:58<23:31, 3.88s/it] {'loss': 1.6852, 'grad_norm': 0.0008034257086176006, 'learning_rate': 0.8213938048432696, 'epoch': 0.3} + 30%|███ | 156/520 [09:58<23:31, 3.88s/it] 30%|███ | 157/520 [10:02<23:26, 3.87s/it] {'loss': 2.0615, 'grad_norm': 0.0010994960944644958, 'learning_rate': 0.8190000769665043, 'epoch': 0.3} + 30%|███ | 157/520 [10:02<23:26, 3.87s/it] 30%|███ | 158/520 [10:06<23:21, 3.87s/it] {'loss': 1.6237, 'grad_norm': 0.0007682358905040899, 'learning_rate': 0.81659395461707, 'epoch': 0.3} + 30%|███ | 158/520 [10:06<23:21, 3.87s/it] 31%|███ | 159/520 [10:10<23:16, 3.87s/it] {'loss': 1.6539, 'grad_norm': 0.0006268041362454995, 'learning_rate': 0.8141755312827736, 'epoch': 0.31} + 31%|███ | 159/520 [10:10<23:16, 3.87s/it] 31%|███ | 160/520 [10:14<23:10, 3.86s/it] {'loss': 1.6854, 'grad_norm': 0.0009021535504720769, 'learning_rate': 0.8117449009293668, 'epoch': 0.31} + 31%|███ | 160/520 [10:14<23:10, 3.86s/it] 31%|███ | 161/520 [10:18<23:03, 3.85s/it] {'loss': 1.7107, 'grad_norm': 0.0007089937952637601, 'learning_rate': 0.8093021579968941, 'epoch': 0.31} + 31%|███ | 161/520 [10:18<23:03, 3.85s/it] 31%|███ | 162/520 [10:22<23:06, 3.87s/it] {'loss': 1.9447, 'grad_norm': 0.0009998698345843188, 'learning_rate': 0.8068473973960237, 'epoch': 0.31} + 31%|███ | 162/520 [10:22<23:06, 3.87s/it] 31%|███▏ | 163/520 [10:26<23:03, 3.87s/it] {'loss': 1.5703, 'grad_norm': 0.0009495688915267075, 'learning_rate': 0.8043807145043603, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:26<23:03, 3.87s/it] 32%|███▏ | 164/520 [10:29<23:00, 3.88s/it] {'loss': 1.5034, 'grad_norm': 0.0008885641663824505, 'learning_rate': 0.8019022051627387, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:29<23:00, 3.88s/it] 32%|███▏ | 165/520 [10:33<22:34, 3.82s/it] {'loss': 1.6389, 'grad_norm': 0.0006322119938455609, 'learning_rate': 0.7994119656715002, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:33<22:34, 3.82s/it] 32%|███▏ | 166/520 [10:37<22:17, 3.78s/it] {'loss': 1.6526, 'grad_norm': 0.0008112434871649934, 'learning_rate': 0.7969100927867507, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:37<22:17, 3.78s/it] 32%|███▏ | 167/520 [10:40<22:02, 3.75s/it] {'loss': 1.6348, 'grad_norm': 0.0006902758041305765, 'learning_rate': 0.7943966837166023, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:40<22:02, 3.75s/it] 32%|███▏ | 168/520 [10:44<21:43, 3.70s/it] {'loss': 1.5888, 'grad_norm': 0.0008477731562899423, 'learning_rate': 0.791871836117395, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:44<21:43, 3.70s/it] 32%|███▎ | 169/520 [10:48<21:45, 3.72s/it] {'loss': 1.6439, 'grad_norm': 0.0006934816885119059, 'learning_rate': 0.789335648089903, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:48<21:45, 3.72s/it] 33%|███▎ | 170/520 [10:52<21:46, 3.73s/it] {'loss': 1.8368, 'grad_norm': 0.0006974070254675314, 'learning_rate': 0.786788218175523, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:52<21:46, 3.73s/it] 33%|███▎ | 171/520 [10:55<21:37, 3.72s/it] {'loss': 1.5768, 'grad_norm': 0.0007219784166968933, 'learning_rate': 0.7842296453524462, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:55<21:37, 3.72s/it] 33%|███▎ | 172/520 [10:59<21:29, 3.70s/it] {'loss': 1.6617, 'grad_norm': 0.0006603818874632118, 'learning_rate': 0.781660029031811, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:59<21:29, 3.70s/it] 33%|███▎ | 173/520 [11:03<21:19, 3.69s/it] {'loss': 1.5797, 'grad_norm': 0.0005812751347450905, 'learning_rate': 0.7790794690538421, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:03<21:19, 3.69s/it] 33%|███▎ | 174/520 [11:06<21:17, 3.69s/it] {'loss': 1.693, 'grad_norm': 0.0008652631390094385, 'learning_rate': 0.7764880656839697, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:06<21:17, 3.69s/it] 34%|███▎ | 175/520 [11:10<21:26, 3.73s/it] {'loss': 1.5551, 'grad_norm': 0.0007771827984310509, 'learning_rate': 0.7738859196089357, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:10<21:26, 3.73s/it] 34%|███▍ | 176/520 [11:14<21:37, 3.77s/it] {'loss': 1.952, 'grad_norm': 0.0011580606907926616, 'learning_rate': 0.7712731319328797, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:14<21:37, 3.77s/it] 34%|███▍ | 177/520 [11:18<21:46, 3.81s/it] {'loss': 1.8079, 'grad_norm': 0.0007584001238223043, 'learning_rate': 0.768649804173412, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:18<21:46, 3.81s/it] 34%|███▍ | 178/520 [11:22<21:47, 3.82s/it] {'loss': 1.6328, 'grad_norm': 0.0008803765785454862, 'learning_rate': 0.7660160382576683, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:22<21:47, 3.82s/it] 34%|███▍ | 179/520 [11:26<21:50, 3.84s/it] {'loss': 1.7518, 'grad_norm': 0.0006600580586110022, 'learning_rate': 0.7633719365183503, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:26<21:50, 3.84s/it] 35%|███▍ | 180/520 [11:29<21:53, 3.86s/it] {'loss': 1.6263, 'grad_norm': 0.0006445721657295738, 'learning_rate': 0.760717601689749, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:30<21:53, 3.86s/it] 35%|███▍ | 181/520 [11:33<21:49, 3.86s/it] {'loss': 1.5999, 'grad_norm': 0.0007985640084926802, 'learning_rate': 0.7580531369037533, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:33<21:49, 3.86s/it] 35%|███▌ | 182/520 [11:37<21:52, 3.88s/it] {'loss': 1.6543, 'grad_norm': 0.0006254700470094563, 'learning_rate': 0.7553786456858429, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:37<21:52, 3.88s/it] 35%|███▌ | 183/520 [11:41<21:53, 3.90s/it] {'loss': 1.6463, 'grad_norm': 0.0006898189133134183, 'learning_rate': 0.7526942319510654, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:41<21:53, 3.90s/it] 35%|███▌ | 184/520 [11:45<21:50, 3.90s/it] {'loss': 1.5606, 'grad_norm': 0.0007516829209312363, 'learning_rate': 0.75, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:45<21:50, 3.90s/it] 36%|███▌ | 185/520 [11:49<21:46, 3.90s/it] {'loss': 1.7612, 'grad_norm': 0.0006743431162804923, 'learning_rate': 0.7472960545147037, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:49<21:46, 3.90s/it] 36%|███▌ | 186/520 [11:53<21:38, 3.89s/it] {'loss': 1.5731, 'grad_norm': 0.0005402946430300814, 'learning_rate': 0.7445825005546447, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:53<21:38, 3.89s/it] 36%|███▌ | 187/520 [11:57<21:35, 3.89s/it] {'loss': 1.5971, 'grad_norm': 0.0006361629777448292, 'learning_rate': 0.7418594435526199, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:57<21:35, 3.89s/it] 36%|███▌ | 188/520 [12:01<21:29, 3.88s/it] {'loss': 1.6648, 'grad_norm': 0.0006716183946896159, 'learning_rate': 0.7391269893106591, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:01<21:29, 3.88s/it] 36%|███▋ | 189/520 [12:05<21:21, 3.87s/it] {'loss': 1.706, 'grad_norm': 0.0007389419205673024, 'learning_rate': 0.7363852439959135, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:05<21:21, 3.87s/it] 37%|███▋ | 190/520 [12:08<21:16, 3.87s/it] {'loss': 1.5891, 'grad_norm': 0.0006438514814415237, 'learning_rate': 0.733634314136531, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:08<21:16, 3.87s/it] 37%|███▋ | 191/520 [12:12<21:14, 3.87s/it] {'loss': 1.5403, 'grad_norm': 0.0005836381562926119, 'learning_rate': 0.7308743066175171, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:12<21:14, 3.87s/it] 37%|███▋ | 192/520 [12:16<21:14, 3.89s/it] {'loss': 1.6528, 'grad_norm': 0.0005844302693590086, 'learning_rate': 0.7281053286765815, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:16<21:14, 3.89s/it] 37%|███▋ | 193/520 [12:20<20:58, 3.85s/it] {'loss': 1.9359, 'grad_norm': 0.0007428370767115947, 'learning_rate': 0.7253274878999727, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:20<20:58, 3.85s/it] 37%|███▋ | 194/520 [12:24<20:42, 3.81s/it] {'loss': 1.756, 'grad_norm': 0.0007160129479807779, 'learning_rate': 0.7225408922182961, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:24<20:42, 3.81s/it] 38%|███▊ | 195/520 [12:27<20:28, 3.78s/it] {'loss': 1.6531, 'grad_norm': 0.0006815862936060497, 'learning_rate': 0.7197456499023225, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:27<20:28, 3.78s/it] 38%|███▊ | 196/520 [12:31<20:25, 3.78s/it] {'loss': 1.6393, 'grad_norm': 0.0006037649205151906, 'learning_rate': 0.716941869558779, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:31<20:25, 3.78s/it] 38%|███▊ | 197/520 [12:35<20:15, 3.76s/it] {'loss': 1.5716, 'grad_norm': 0.0006566399956170873, 'learning_rate': 0.7141296601261313, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:35<20:15, 3.76s/it] 38%|███▊ | 198/520 [12:39<20:11, 3.76s/it] {'loss': 1.715, 'grad_norm': 0.0006656436657470048, 'learning_rate': 0.7113091308703497, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:39<20:11, 3.76s/it] 38%|███▊ | 199/520 [12:42<20:01, 3.74s/it] {'loss': 1.5773, 'grad_norm': 0.0005953858696105602, 'learning_rate': 0.7084803913806641, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:42<20:01, 3.74s/it] 38%|███▊ | 200/520 [12:46<19:56, 3.74s/it] {'loss': 1.8152, 'grad_norm': 0.0008210709331108, 'learning_rate': 0.7056435515653059, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:46<19:56, 3.74s/it] 39%|███▊ | 201/520 [12:50<19:44, 3.71s/it] {'loss': 1.8042, 'grad_norm': 0.0006773198925554239, 'learning_rate': 0.7027987216472376, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:50<19:44, 3.71s/it] 39%|███▉ | 202/520 [12:53<19:40, 3.71s/it] {'loss': 1.5814, 'grad_norm': 0.0005965581558248684, 'learning_rate': 0.6999460121598704, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:53<19:40, 3.71s/it] 39%|███▉ | 203/520 [12:57<19:33, 3.70s/it] {'loss': 1.6293, 'grad_norm': 0.0006278251822713263, 'learning_rate': 0.6970855339427697, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:57<19:33, 3.70s/it] 39%|███▉ | 204/520 [13:01<19:27, 3.69s/it] {'loss': 1.6942, 'grad_norm': 0.0006022442389824958, 'learning_rate': 0.6942173981373474, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:01<19:27, 3.69s/it] 39%|███▉ | 205/520 [13:04<19:27, 3.70s/it] {'loss': 1.8429, 'grad_norm': 0.0007833507274326058, 'learning_rate': 0.6913417161825449, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:04<19:27, 3.70s/it] 40%|███▉ | 206/520 [13:08<19:18, 3.69s/it] {'loss': 1.717, 'grad_norm': 0.0005620445801182956, 'learning_rate': 0.6884585998105026, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:08<19:18, 3.69s/it] 40%|███▉ | 207/520 [13:12<19:16, 3.69s/it] {'loss': 1.8195, 'grad_norm': 0.0008795275757122977, 'learning_rate': 0.685568161042219, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:12<19:16, 3.69s/it] 40%|████ | 208/520 [13:16<19:16, 3.71s/it] {'loss': 1.6463, 'grad_norm': 0.000700551285181131, 'learning_rate': 0.6826705121831976, 'epoch': 0.4} + 40%|████ | 208/520 [13:16<19:16, 3.71s/it] 40%|████ | 209/520 [13:19<19:14, 3.71s/it] {'loss': 1.6015, 'grad_norm': 0.0006355805033362378, 'learning_rate': 0.6797657658190838, 'epoch': 0.4} + 40%|████ | 209/520 [13:19<19:14, 3.71s/it] 40%|████ | 210/520 [13:23<19:05, 3.70s/it] {'loss': 1.6446, 'grad_norm': 0.0006633144737232493, 'learning_rate': 0.6768540348112907, 'epoch': 0.4} + 40%|████ | 210/520 [13:23<19:05, 3.70s/it] 41%|████ | 211/520 [13:27<19:03, 3.70s/it] {'loss': 1.661, 'grad_norm': 0.00069348773377308, 'learning_rate': 0.6739354322926135, 'epoch': 0.41} + 41%|████ | 211/520 [13:27<19:03, 3.70s/it] 41%|████ | 212/520 [13:30<18:55, 3.69s/it] {'loss': 1.6484, 'grad_norm': 0.0006182339932988149, 'learning_rate': 0.6710100716628344, 'epoch': 0.41} + 41%|████ | 212/520 [13:30<18:55, 3.69s/it] 41%|████ | 213/520 [13:34<18:50, 3.68s/it] {'loss': 1.6157, 'grad_norm': 0.000696597690937503, 'learning_rate': 0.6680780665843155, 'epoch': 0.41} + 41%|████ | 213/520 [13:34<18:50, 3.68s/it] 41%|████ | 214/520 [13:38<18:51, 3.70s/it] {'loss': 1.6585, 'grad_norm': 0.0006353991727982782, 'learning_rate': 0.6651395309775836, 'epoch': 0.41} + 41%|████ | 214/520 [13:38<18:51, 3.70s/it] 41%|████▏ | 215/520 [13:41<18:45, 3.69s/it] {'loss': 1.7349, 'grad_norm': 0.0006465800391227429, 'learning_rate': 0.6621945790169036, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:41<18:45, 3.69s/it] 42%|████▏ | 216/520 [13:45<18:43, 3.70s/it] {'loss': 1.4871, 'grad_norm': 0.000646325868186846, 'learning_rate': 0.6592433251258423, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:45<18:43, 3.70s/it] 42%|████▏ | 217/520 [13:49<18:39, 3.70s/it] {'loss': 1.6398, 'grad_norm': 0.0005882863480868208, 'learning_rate': 0.6562858839728223, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:49<18:39, 3.70s/it] 42%|████▏ | 218/520 [13:53<18:35, 3.69s/it] {'loss': 1.6714, 'grad_norm': 0.0006025125756023148, 'learning_rate': 0.6533223704666672, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:53<18:35, 3.69s/it] 42%|████▏ | 219/520 [13:56<18:34, 3.70s/it] {'loss': 1.588, 'grad_norm': 0.000539679752294778, 'learning_rate': 0.6503528997521365, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:56<18:34, 3.70s/it] 42%|████▏ | 220/520 [14:00<18:40, 3.74s/it] {'loss': 1.7716, 'grad_norm': 0.0010012019782743922, 'learning_rate': 0.6473775872054521, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:00<18:40, 3.74s/it] 42%|████▎ | 221/520 [14:04<18:48, 3.77s/it] {'loss': 1.6425, 'grad_norm': 0.000572355167230654, 'learning_rate': 0.644396548429815, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:04<18:48, 3.77s/it] 43%|████▎ | 222/520 [14:08<18:50, 3.79s/it] {'loss': 1.5087, 'grad_norm': 0.0005591707754140685, 'learning_rate': 0.6414098992509137, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:08<18:50, 3.79s/it] 43%|████▎ | 223/520 [14:12<18:50, 3.81s/it] {'loss': 1.5191, 'grad_norm': 0.00050940008090395, 'learning_rate': 0.6384177557124247, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:12<18:50, 3.81s/it] 43%|████▎ | 224/520 [14:15<18:43, 3.79s/it] {'loss': 2.2255, 'grad_norm': 0.0009811361219348958, 'learning_rate': 0.6354202340715026, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:15<18:43, 3.79s/it] 43%|████▎ | 225/520 [14:19<18:31, 3.77s/it] {'loss': 1.546, 'grad_norm': 0.0006012314455481493, 'learning_rate': 0.6324174507942636, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:19<18:31, 3.77s/it] 43%|████▎ | 226/520 [14:23<18:19, 3.74s/it] {'loss': 1.6653, 'grad_norm': 0.0006027843721691774, 'learning_rate': 0.6294095225512604, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:23<18:19, 3.74s/it] 44%|████▎ | 227/520 [14:26<18:13, 3.73s/it] {'loss': 1.6383, 'grad_norm': 0.0006102399524313252, 'learning_rate': 0.6263965662129487, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:26<18:13, 3.73s/it] 44%|████▍ | 228/520 [14:30<18:05, 3.72s/it] {'loss': 1.9583, 'grad_norm': 0.0005733784900919949, 'learning_rate': 0.6233786988451467, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:30<18:05, 3.72s/it] 44%|████▍ | 229/520 [14:34<17:58, 3.71s/it] {'loss': 1.6429, 'grad_norm': 0.0006312486739535178, 'learning_rate': 0.6203560377044866, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:34<17:58, 3.71s/it] 44%|████▍ | 230/520 [14:38<17:56, 3.71s/it] {'loss': 1.4813, 'grad_norm': 0.0005706996309198442, 'learning_rate': 0.6173287002338577, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:38<17:56, 3.71s/it] 44%|████▍ | 231/520 [14:41<17:51, 3.71s/it] {'loss': 1.5638, 'grad_norm': 0.0005278225769774252, 'learning_rate': 0.6142968040578448, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:41<17:51, 3.71s/it] 45%|████▍ | 232/520 [14:45<17:45, 3.70s/it] {'loss': 2.0162, 'grad_norm': 0.0007068480823574227, 'learning_rate': 0.6112604669781572, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:45<17:45, 3.70s/it] 45%|████▍ | 233/520 [14:49<17:41, 3.70s/it] {'loss': 1.8396, 'grad_norm': 0.0006437928237915074, 'learning_rate': 0.6082198069690514, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:49<17:41, 3.70s/it] 45%|████▌ | 234/520 [14:52<17:38, 3.70s/it] {'loss': 1.5342, 'grad_norm': 0.0005474459757772173, 'learning_rate': 0.6051749421727479, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:52<17:38, 3.70s/it] 45%|████▌ | 235/520 [14:56<17:35, 3.70s/it] {'loss': 1.5807, 'grad_norm': 0.0008067983031319058, 'learning_rate': 0.6021259908948402, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:56<17:35, 3.70s/it] 45%|████▌ | 236/520 [15:00<17:30, 3.70s/it] {'loss': 1.7263, 'grad_norm': 0.0005189000594870148, 'learning_rate': 0.5990730715996988, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:00<17:30, 3.70s/it] 46%|████▌ | 237/520 [15:03<17:25, 3.69s/it] {'loss': 1.6487, 'grad_norm': 0.000507387576759161, 'learning_rate': 0.5960163029058682, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:03<17:25, 3.69s/it] 46%|████▌ | 238/520 [15:07<17:21, 3.69s/it] {'loss': 1.5864, 'grad_norm': 0.0005502184918960017, 'learning_rate': 0.5929558035814574, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:07<17:21, 3.69s/it] 46%|████▌ | 239/520 [15:11<17:19, 3.70s/it] {'loss': 1.7318, 'grad_norm': 0.0005863278592848085, 'learning_rate': 0.5898916925395263, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:11<17:19, 3.70s/it] 46%|████▌ | 240/520 [15:14<17:13, 3.69s/it] {'loss': 1.4192, 'grad_norm': 0.0005315925717040915, 'learning_rate': 0.5868240888334653, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:14<17:13, 3.69s/it] 46%|████▋ | 241/520 [15:18<17:14, 3.71s/it] {'loss': 1.5177, 'grad_norm': 0.0005968756924233629, 'learning_rate': 0.5837531116523682, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:18<17:14, 3.71s/it] 47%|████▋ | 242/520 [15:22<17:09, 3.70s/it] {'loss': 1.5615, 'grad_norm': 0.0005874436553035279, 'learning_rate': 0.5806788803164034, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:22<17:09, 3.70s/it] 47%|████▋ | 243/520 [15:26<17:08, 3.71s/it] {'loss': 1.5432, 'grad_norm': 0.0005914169063089228, 'learning_rate': 0.5776015142721758, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:26<17:08, 3.71s/it] 47%|████▋ | 244/520 [15:29<17:03, 3.71s/it] {'loss': 1.7212, 'grad_norm': 0.0007040757374288113, 'learning_rate': 0.5745211330880872, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:29<17:03, 3.71s/it] 47%|████▋ | 245/520 [15:33<16:58, 3.71s/it] {'loss': 1.5539, 'grad_norm': 0.0008827784366374681, 'learning_rate': 0.57143785644969, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:33<16:58, 3.71s/it] 47%|████▋ | 246/520 [15:37<17:03, 3.74s/it] {'loss': 1.9778, 'grad_norm': 0.0006151844248834158, 'learning_rate': 0.5683518041550367, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:37<17:03, 3.74s/it] 48%|████▊ | 247/520 [15:41<17:00, 3.74s/it] {'loss': 1.7384, 'grad_norm': 0.0005581197415534711, 'learning_rate': 0.5652630961100259, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:41<17:00, 3.74s/it] 48%|████▊ | 248/520 [15:44<16:56, 3.74s/it] {'loss': 1.5319, 'grad_norm': 0.0005336480444651815, 'learning_rate': 0.5621718523237427, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:44<16:56, 3.74s/it] 48%|████▊ | 249/520 [15:48<16:53, 3.74s/it] {'loss': 1.6597, 'grad_norm': 0.0005393381520713867, 'learning_rate': 0.5590781929037965, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:48<16:53, 3.74s/it] 48%|████▊ | 250/520 [15:52<16:54, 3.76s/it] {'loss': 1.613, 'grad_norm': 0.0005684699035058699, 'learning_rate': 0.5559822380516539, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:52<16:54, 3.76s/it] 48%|████▊ | 251/520 [15:56<16:53, 3.77s/it] {'loss': 1.6643, 'grad_norm': 0.0004588307742213756, 'learning_rate': 0.552884108057969, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:56<16:53, 3.77s/it] 48%|████▊ | 252/520 [15:59<16:48, 3.76s/it] {'loss': 1.82, 'grad_norm': 0.0005216497935278349, 'learning_rate': 0.5497839232979084, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:59<16:48, 3.76s/it] 49%|████▊ | 253/520 [16:03<16:49, 3.78s/it] {'loss': 1.6768, 'grad_norm': 0.0005886250088102381, 'learning_rate': 0.5466818042264753, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:03<16:49, 3.78s/it] 49%|████▉ | 254/520 [16:07<16:52, 3.81s/it] {'loss': 1.5478, 'grad_norm': 0.0004978748816816967, 'learning_rate': 0.5435778713738292, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:07<16:52, 3.81s/it] 49%|████▉ | 255/520 [16:11<16:55, 3.83s/it] {'loss': 1.5899, 'grad_norm': 0.0005446770131284653, 'learning_rate': 0.5404722453406017, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:11<16:55, 3.83s/it] 49%|████▉ | 256/520 [16:15<16:55, 3.85s/it] {'loss': 1.6308, 'grad_norm': 0.0005585498573877629, 'learning_rate': 0.5373650467932122, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:15<16:55, 3.85s/it] 49%|████▉ | 257/520 [16:19<16:55, 3.86s/it] {'loss': 1.6574, 'grad_norm': 0.0005368134738416144, 'learning_rate': 0.5342563964591783, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:19<16:55, 3.86s/it] 50%|████▉ | 258/520 [16:23<16:48, 3.85s/it] {'loss': 1.6536, 'grad_norm': 0.00044646670649432443, 'learning_rate': 0.5311464151224261, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:23<16:48, 3.85s/it] 50%|████▉ | 259/520 [16:26<16:45, 3.85s/it] {'loss': 1.7213, 'grad_norm': 0.000557389986813053, 'learning_rate': 0.5280352236185959, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:26<16:45, 3.85s/it] 50%|█████ | 260/520 [16:30<16:41, 3.85s/it] {'loss': 1.9389, 'grad_norm': 0.0005820812034490389, 'learning_rate': 0.5249229428303486, 'epoch': 0.5} + 50%|█████ | 260/520 [16:30<16:41, 3.85s/it] 50%|█████ | 261/520 [16:34<16:39, 3.86s/it] {'loss': 1.8526, 'grad_norm': 0.0006222043196831347, 'learning_rate': 0.521809693682668, 'epoch': 0.5} + 50%|█████ | 261/520 [16:34<16:39, 3.86s/it] 50%|█████ | 262/520 [16:38<16:36, 3.86s/it] {'loss': 1.5609, 'grad_norm': 0.0005170341302877493, 'learning_rate': 0.5186955971381629, 'epoch': 0.5} + 50%|█████ | 262/520 [16:38<16:36, 3.86s/it] 51%|█████ | 263/520 [16:42<16:17, 3.80s/it] {'loss': 1.8843, 'grad_norm': 0.0005654669464989388, 'learning_rate': 0.5155807741923666, 'epoch': 0.51} + 51%|█████ | 263/520 [16:42<16:17, 3.80s/it] 51%|█████ | 264/520 [16:45<16:08, 3.78s/it] {'loss': 1.6946, 'grad_norm': 0.0004976554394040484, 'learning_rate': 0.5124653458690365, 'epoch': 0.51} + 51%|█████ | 264/520 [16:45<16:08, 3.78s/it] 51%|█████ | 265/520 [16:49<15:59, 3.76s/it] {'loss': 1.5773, 'grad_norm': 0.000625505285918962, 'learning_rate': 0.5093494332154511, 'epoch': 0.51} + 51%|█████ | 265/520 [16:49<15:59, 3.76s/it] 51%|█████ | 266/520 [16:53<15:53, 3.75s/it] {'loss': 1.3953, 'grad_norm': 0.0005053422368439256, 'learning_rate': 0.5062331572977076, 'epoch': 0.51} + 51%|█████ | 266/520 [16:53<15:53, 3.75s/it] 51%|█████▏ | 267/520 [16:57<15:45, 3.74s/it] {'loss': 1.5698, 'grad_norm': 0.0005593987359497685, 'learning_rate': 0.5031166391960168, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:57<15:45, 3.74s/it] 52%|█████▏ | 268/520 [17:00<15:38, 3.72s/it] {'loss': 2.0306, 'grad_norm': 0.0006335823458852843, 'learning_rate': 0.5, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:00<15:38, 3.72s/it] 52%|█████▏ | 269/520 [17:04<15:30, 3.71s/it] {'loss': 1.6783, 'grad_norm': 0.000527784925113627, 'learning_rate': 0.4968833608039832, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:04<15:30, 3.71s/it] 52%|█████▏ | 270/520 [17:08<15:29, 3.72s/it] {'loss': 1.751, 'grad_norm': 0.0006844395137803208, 'learning_rate': 0.4937668427022925, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:08<15:29, 3.72s/it] 52%|█████▏ | 271/520 [17:11<15:22, 3.71s/it] {'loss': 1.683, 'grad_norm': 0.0006070559307628261, 'learning_rate': 0.490650566784549, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:11<15:22, 3.71s/it] 52%|█████▏ | 272/520 [17:15<15:23, 3.72s/it] {'loss': 1.8034, 'grad_norm': 0.000561409073558856, 'learning_rate': 0.48753465413096364, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:15<15:23, 3.72s/it] 52%|█████▎ | 273/520 [17:19<15:15, 3.71s/it] {'loss': 1.8767, 'grad_norm': 0.0005596819325702742, 'learning_rate': 0.4844192258076335, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:19<15:15, 3.71s/it] 53%|█████▎ | 274/520 [17:22<15:10, 3.70s/it] {'loss': 1.623, 'grad_norm': 0.0006349867129216933, 'learning_rate': 0.48130440286183723, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:22<15:10, 3.70s/it] 53%|█████▎ | 275/520 [17:26<15:05, 3.69s/it] {'loss': 1.538, 'grad_norm': 0.0005076530425306627, 'learning_rate': 0.47819030631733206, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:26<15:05, 3.69s/it] 53%|█████▎ | 276/520 [17:30<15:02, 3.70s/it] {'loss': 1.6517, 'grad_norm': 0.000526938798806443, 'learning_rate': 0.4750770571696514, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:30<15:02, 3.70s/it] 53%|█████▎ | 277/520 [17:34<14:56, 3.69s/it] {'loss': 1.8947, 'grad_norm': 0.0005211933532610699, 'learning_rate': 0.47196477638140405, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:34<14:56, 3.69s/it] 53%|█████▎ | 278/520 [17:37<14:53, 3.69s/it] {'loss': 1.4972, 'grad_norm': 0.0005051656025643484, 'learning_rate': 0.46885358487757395, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:37<14:53, 3.69s/it] 54%|█████▎ | 279/520 [17:41<14:46, 3.68s/it] {'loss': 1.8264, 'grad_norm': 0.000654185222357088, 'learning_rate': 0.46574360354082167, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:41<14:46, 3.68s/it] 54%|█████▍ | 280/520 [17:45<14:42, 3.68s/it] {'loss': 1.5541, 'grad_norm': 0.0005984952129144934, 'learning_rate': 0.4626349532067879, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:45<14:42, 3.68s/it] 54%|█████▍ | 281/520 [17:48<14:39, 3.68s/it] {'loss': 1.6961, 'grad_norm': 0.0005589990561339553, 'learning_rate': 0.4595277546593983, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:48<14:39, 3.68s/it] 54%|█████▍ | 282/520 [17:52<14:35, 3.68s/it] {'loss': 1.5107, 'grad_norm': 0.00047583372008484456, 'learning_rate': 0.4564221286261709, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:52<14:35, 3.68s/it] 54%|█████▍ | 283/520 [17:56<14:34, 3.69s/it] {'loss': 1.7523, 'grad_norm': 0.0005434021109927867, 'learning_rate': 0.4533181957735247, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:56<14:34, 3.69s/it] 55%|█████▍ | 284/520 [17:59<14:31, 3.69s/it] {'loss': 1.7842, 'grad_norm': 0.000585848479008622, 'learning_rate': 0.45021607670209174, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:59<14:31, 3.69s/it] 55%|█████▍ | 285/520 [18:03<14:25, 3.68s/it] {'loss': 1.5718, 'grad_norm': 0.0005141707808637697, 'learning_rate': 0.44711589194203116, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:03<14:25, 3.68s/it] 55%|█████▌ | 286/520 [18:07<14:23, 3.69s/it] {'loss': 1.4122, 'grad_norm': 0.000571757756283708, 'learning_rate': 0.4440177619483461, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:07<14:23, 3.69s/it] 55%|█████▌ | 287/520 [18:10<14:18, 3.68s/it] {'loss': 1.6749, 'grad_norm': 0.0005048892025309767, 'learning_rate': 0.4409218070962036, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:10<14:18, 3.68s/it] 55%|█████▌ | 288/520 [18:14<14:17, 3.70s/it] {'loss': 1.7739, 'grad_norm': 0.0004366924053093913, 'learning_rate': 0.43782814767625755, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:14<14:17, 3.70s/it] 56%|█████▌ | 289/520 [18:18<14:12, 3.69s/it] {'loss': 1.5789, 'grad_norm': 0.0005428264499735137, 'learning_rate': 0.4347369038899743, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:18<14:12, 3.69s/it] 56%|█████▌ | 290/520 [18:21<14:09, 3.69s/it] {'loss': 1.5039, 'grad_norm': 0.0004519500180515928, 'learning_rate': 0.4316481958449634, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:21<14:09, 3.69s/it] 56%|█████▌ | 291/520 [18:25<14:05, 3.69s/it] {'loss': 1.531, 'grad_norm': 0.0005194264693947645, 'learning_rate': 0.4285621435503101, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:25<14:05, 3.69s/it] 56%|█████▌ | 292/520 [18:29<14:06, 3.71s/it] {'loss': 1.6199, 'grad_norm': 0.0006148387677862515, 'learning_rate': 0.4254788669119127, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:29<14:06, 3.71s/it] 56%|█████▋ | 293/520 [18:33<14:01, 3.71s/it] {'loss': 1.524, 'grad_norm': 0.0005240471745423458, 'learning_rate': 0.4223984857278242, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:33<14:01, 3.71s/it] 57%|█████▋ | 294/520 [18:36<13:58, 3.71s/it] {'loss': 1.5661, 'grad_norm': 0.0005557562464307119, 'learning_rate': 0.41932111968359664, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:36<13:58, 3.71s/it] 57%|█████▋ | 295/520 [18:40<13:50, 3.69s/it] {'loss': 1.8833, 'grad_norm': 0.0005368411375967342, 'learning_rate': 0.41624688834763185, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:40<13:50, 3.69s/it] 57%|█████▋ | 296/520 [18:44<13:46, 3.69s/it] {'loss': 1.5241, 'grad_norm': 0.0006302741932543072, 'learning_rate': 0.41317591116653485, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:44<13:46, 3.69s/it] 57%|█████▋ | 297/520 [18:47<13:46, 3.71s/it] {'loss': 1.7042, 'grad_norm': 0.0005406334328976758, 'learning_rate': 0.41010830746047366, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:47<13:46, 3.71s/it] 57%|█████▋ | 298/520 [18:51<13:42, 3.70s/it] {'loss': 1.6371, 'grad_norm': 0.0004199447338645637, 'learning_rate': 0.4070441964185427, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:51<13:42, 3.70s/it] 57%|█████▊ | 299/520 [18:55<13:40, 3.71s/it] {'loss': 1.8791, 'grad_norm': 0.0005127615853328542, 'learning_rate': 0.40398369709413195, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:55<13:40, 3.71s/it] 58%|█████▊ | 300/520 [18:59<13:38, 3.72s/it] {'loss': 1.7112, 'grad_norm': 0.0005437121603336848, 'learning_rate': 0.4009269284003013, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:59<13:38, 3.72s/it] 58%|█████▊ | 301/520 [19:02<13:32, 3.71s/it] {'loss': 1.6768, 'grad_norm': 0.0004647902156466226, 'learning_rate': 0.3978740091051599, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:02<13:32, 3.71s/it] 58%|█████▊ | 302/520 [19:06<13:37, 3.75s/it] {'loss': 1.8774, 'grad_norm': 0.0005125665059965884, 'learning_rate': 0.3948250578272522, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:06<13:37, 3.75s/it] 58%|█████▊ | 303/520 [19:10<13:30, 3.74s/it] {'loss': 1.5704, 'grad_norm': 0.0005363507433985933, 'learning_rate': 0.3917801930309486, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:10<13:30, 3.74s/it] 58%|█████▊ | 304/520 [19:14<13:26, 3.73s/it] {'loss': 1.7557, 'grad_norm': 0.0005580964252813932, 'learning_rate': 0.38873953302184283, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:14<13:26, 3.73s/it] 59%|█████▊ | 305/520 [19:17<13:20, 3.72s/it] {'loss': 1.7723, 'grad_norm': 0.000585136665153674, 'learning_rate': 0.3857031959421553, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:17<13:20, 3.72s/it] 59%|█████▉ | 306/520 [19:21<13:16, 3.72s/it] {'loss': 1.6439, 'grad_norm': 0.0005112347957132431, 'learning_rate': 0.3826712997661425, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:21<13:16, 3.72s/it] 59%|█████▉ | 307/520 [19:25<13:10, 3.71s/it] {'loss': 1.5821, 'grad_norm': 0.0005552909958189247, 'learning_rate': 0.3796439622955136, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:25<13:10, 3.71s/it] 59%|█████▉ | 308/520 [19:28<13:07, 3.72s/it] {'loss': 1.7166, 'grad_norm': 0.00045375844660296464, 'learning_rate': 0.37662130115485315, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:28<13:07, 3.72s/it] 59%|█████▉ | 309/520 [19:32<13:21, 3.80s/it] {'loss': 1.5731, 'grad_norm': 0.0004508595766779577, 'learning_rate': 0.3736034337870512, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:32<13:21, 3.80s/it] 60%|█████▉ | 310/520 [19:36<13:12, 3.77s/it] {'loss': 1.5396, 'grad_norm': 0.0004922462766418493, 'learning_rate': 0.3705904774487396, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:36<13:12, 3.77s/it] 60%|█████▉ | 311/520 [19:40<13:04, 3.75s/it] {'loss': 1.538, 'grad_norm': 0.0005248213390239686, 'learning_rate': 0.36758254920573635, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:40<13:04, 3.75s/it] 60%|██████ | 312/520 [19:44<13:28, 3.89s/it] {'loss': 1.4992, 'grad_norm': 0.000609894654534041, 'learning_rate': 0.3645797659284975, 'epoch': 0.6} + 60%|██████ | 312/520 [19:44<13:28, 3.89s/it] 60%|██████ | 313/520 [19:48<13:18, 3.86s/it] {'loss': 1.4602, 'grad_norm': 0.0004608351123551656, 'learning_rate': 0.36158224428757535, 'epoch': 0.6} + 60%|██████ | 313/520 [19:48<13:18, 3.86s/it] 60%|██████ | 314/520 [19:52<13:24, 3.91s/it] {'loss': 1.5097, 'grad_norm': 0.0004532170393296556, 'learning_rate': 0.35859010074908626, 'epoch': 0.6} + 60%|██████ | 314/520 [19:52<13:24, 3.91s/it] 61%|██████ | 315/520 [19:55<13:06, 3.84s/it] {'loss': 1.918, 'grad_norm': 0.000550394399572973, 'learning_rate': 0.35560345157018514, 'epoch': 0.61} + 61%|██████ | 315/520 [19:55<13:06, 3.84s/it] 61%|██████ | 316/520 [20:00<13:25, 3.95s/it] {'loss': 1.4999, 'grad_norm': 0.0005114442894934082, 'learning_rate': 0.35262241279454787, 'epoch': 0.61} + 61%|██████ | 316/520 [20:00<13:25, 3.95s/it] 61%|██████ | 317/520 [20:03<13:06, 3.87s/it] {'loss': 1.5008, 'grad_norm': 0.00046330987207532706, 'learning_rate': 0.3496471002478635, 'epoch': 0.61} + 61%|██████ | 317/520 [20:03<13:06, 3.87s/it] 61%|██████ | 318/520 [20:07<12:56, 3.84s/it] {'loss': 1.6621, 'grad_norm': 0.0004891769412178292, 'learning_rate': 0.3466776295333329, 'epoch': 0.61} + 61%|██████ | 318/520 [20:07<12:56, 3.84s/it] 61%|██████▏ | 319/520 [20:11<12:59, 3.88s/it] {'loss': 1.4994, 'grad_norm': 0.00042991943834216846, 'learning_rate': 0.34371411602717783, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:11<12:59, 3.88s/it] 62%|██████▏ | 320/520 [20:15<12:43, 3.82s/it] {'loss': 1.4336, 'grad_norm': 0.0004619068832804314, 'learning_rate': 0.34075667487415784, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:15<12:43, 3.82s/it] 62%|██████▏ | 321/520 [20:19<12:39, 3.82s/it] {'loss': 1.6964, 'grad_norm': 0.0004704803029331129, 'learning_rate': 0.3378054209830965, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:19<12:39, 3.82s/it] 62%|██████▏ | 322/520 [20:22<12:37, 3.82s/it] {'loss': 1.7013, 'grad_norm': 0.0006402820778999564, 'learning_rate': 0.3348604690224166, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:22<12:37, 3.82s/it] 62%|██████▏ | 323/520 [20:26<12:34, 3.83s/it] {'loss': 1.8362, 'grad_norm': 0.0006900150044019711, 'learning_rate': 0.3319219334156847, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:26<12:34, 3.83s/it] 62%|██████▏ | 324/520 [20:30<12:31, 3.84s/it] {'loss': 1.6037, 'grad_norm': 0.00045212948073460117, 'learning_rate': 0.32898992833716567, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:30<12:31, 3.84s/it] 62%|██████▎ | 325/520 [20:34<12:28, 3.84s/it] {'loss': 1.6093, 'grad_norm': 0.0005617415161856354, 'learning_rate': 0.32606456770738634, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:34<12:28, 3.84s/it] 63%|██████▎ | 326/520 [20:38<12:26, 3.85s/it] {'loss': 1.5875, 'grad_norm': 0.0004942856280121372, 'learning_rate': 0.3231459651887093, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:38<12:26, 3.85s/it] 63%|██████▎ | 327/520 [20:42<12:21, 3.84s/it] {'loss': 1.8946, 'grad_norm': 0.0007039780895626214, 'learning_rate': 0.32023423418091623, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:42<12:21, 3.84s/it] 63%|██████▎ | 328/520 [20:46<12:17, 3.84s/it] {'loss': 1.7021, 'grad_norm': 0.000516310133442142, 'learning_rate': 0.3173294878168025, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:46<12:17, 3.84s/it] 63%|██████▎ | 329/520 [20:49<12:15, 3.85s/it] {'loss': 1.4904, 'grad_norm': 0.00041995899967269554, 'learning_rate': 0.31443183895778104, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:49<12:15, 3.85s/it] 63%|██████▎ | 330/520 [20:53<12:10, 3.84s/it] {'loss': 1.5886, 'grad_norm': 0.0004555260578749891, 'learning_rate': 0.3115414001894974, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:53<12:10, 3.84s/it] 64%|██████▎ | 331/520 [20:57<12:07, 3.85s/it] {'loss': 1.5353, 'grad_norm': 0.0004783333365424425, 'learning_rate': 0.30865828381745514, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:57<12:07, 3.85s/it] 64%|██████▍ | 332/520 [21:01<11:53, 3.80s/it] {'loss': 1.8851, 'grad_norm': 0.0005685725177832384, 'learning_rate': 0.30578260186265266, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:01<11:53, 3.80s/it] 64%|██████▍ | 333/520 [21:04<11:42, 3.76s/it] {'loss': 1.7682, 'grad_norm': 0.0005224871341107221, 'learning_rate': 0.3029144660572304, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:04<11:42, 3.76s/it] 64%|██████▍ | 334/520 [21:08<11:36, 3.74s/it] {'loss': 1.5986, 'grad_norm': 0.0004905542856587677, 'learning_rate': 0.3000539878401296, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:08<11:36, 3.74s/it] 64%|██████▍ | 335/520 [21:12<11:29, 3.72s/it] {'loss': 1.6119, 'grad_norm': 0.00048184170995784305, 'learning_rate': 0.29720127835276255, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:12<11:29, 3.72s/it] 65%|██████▍ | 336/520 [21:16<11:24, 3.72s/it] {'loss': 1.516, 'grad_norm': 0.0005379959982309423, 'learning_rate': 0.29435644843469433, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:16<11:24, 3.72s/it] 65%|██████▍ | 337/520 [21:19<11:29, 3.77s/it] {'loss': 1.4979, 'grad_norm': 0.00046439693813228974, 'learning_rate': 0.2915196086193361, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:19<11:29, 3.77s/it] 65%|██████▌ | 338/520 [21:23<11:29, 3.79s/it] {'loss': 1.6331, 'grad_norm': 0.0004967446116178785, 'learning_rate': 0.28869086912965036, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:23<11:29, 3.79s/it] 65%|██████▌ | 339/520 [21:27<11:35, 3.84s/it] {'loss': 1.562, 'grad_norm': 0.0005058550061766712, 'learning_rate': 0.28587033987386856, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:27<11:35, 3.84s/it] 65%|██████▌ | 340/520 [21:31<11:34, 3.86s/it] {'loss': 1.5263, 'grad_norm': 0.0004623454603603415, 'learning_rate': 0.28305813044122097, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:31<11:34, 3.86s/it] 66%|██████▌ | 341/520 [21:35<11:41, 3.92s/it] {'loss': 1.5633, 'grad_norm': 0.0004851858376611759, 'learning_rate': 0.28025435009767746, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:35<11:41, 3.92s/it] 66%|██████▌ | 342/520 [21:39<11:37, 3.92s/it] {'loss': 1.8755, 'grad_norm': 0.0008272856245973709, 'learning_rate': 0.2774591077817038, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:39<11:37, 3.92s/it] 66%|██████▌ | 343/520 [21:43<11:33, 3.92s/it] {'loss': 1.8276, 'grad_norm': 0.000549144555079162, 'learning_rate': 0.2746725121000273, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:43<11:33, 3.92s/it] 66%|██████▌ | 344/520 [21:47<11:29, 3.92s/it] {'loss': 1.4823, 'grad_norm': 0.0004201168766011423, 'learning_rate': 0.2718946713234185, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:47<11:29, 3.92s/it] 66%|██████▋ | 345/520 [21:51<11:24, 3.91s/it] {'loss': 1.6422, 'grad_norm': 0.000585136098798458, 'learning_rate': 0.26912569338248316, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:51<11:24, 3.91s/it] 67%|██████▋ | 346/520 [21:55<11:16, 3.89s/it] {'loss': 1.8177, 'grad_norm': 0.0005545188247127101, 'learning_rate': 0.266365685863469, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:55<11:16, 3.89s/it] 67%|██████▋ | 347/520 [21:58<11:01, 3.82s/it] {'loss': 1.5086, 'grad_norm': 0.0005201725164427804, 'learning_rate': 0.26361475600408657, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:58<11:01, 3.82s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:02<10:49, 3.78s/it] {'loss': 1.524, 'grad_norm': 0.0005522062831295883, 'learning_rate': 0.26087301068934104, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:02<10:49, 3.78s/it] 67%|██████▋ | 349/520 [22:06<10:41, 3.75s/it] {'loss': 1.5515, 'grad_norm': 0.0005339825912766345, 'learning_rate': 0.2581405564473801, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:06<10:41, 3.75s/it] 67%|██████▋ | 350/520 [22:09<10:32, 3.72s/it] {'loss': 1.5867, 'grad_norm': 0.000527011871406004, 'learning_rate': 0.2554174994453555, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:09<10:32, 3.72s/it] 68%|██████▊ | 351/520 [22:13<10:27, 3.71s/it] {'loss': 1.4465, 'grad_norm': 0.0004652254505703219, 'learning_rate': 0.2527039454852963, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:13<10:27, 3.71s/it] 68%|██████▊ | 352/520 [22:17<10:21, 3.70s/it] {'loss': 1.6052, 'grad_norm': 0.00046092640095155783, 'learning_rate': 0.2500000000000001, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:17<10:21, 3.70s/it] 68%|██████▊ | 353/520 [22:20<10:19, 3.71s/it] {'loss': 1.7738, 'grad_norm': 0.0004924130994309181, 'learning_rate': 0.24730576804893478, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:20<10:19, 3.71s/it] 68%|██████▊ | 354/520 [22:24<10:12, 3.69s/it] {'loss': 1.89, 'grad_norm': 0.0004932917659764197, 'learning_rate': 0.24462135431415732, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:24<10:12, 3.69s/it] 68%|██████▊ | 355/520 [22:28<10:08, 3.69s/it] {'loss': 1.5509, 'grad_norm': 0.00046979839673828887, 'learning_rate': 0.24194686309624663, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:28<10:08, 3.69s/it] 68%|██████▊ | 356/520 [22:31<10:03, 3.68s/it] {'loss': 1.5573, 'grad_norm': 0.0005338081000243152, 'learning_rate': 0.239282398310251, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:31<10:03, 3.68s/it] 69%|██████▊ | 357/520 [22:35<09:58, 3.67s/it] {'loss': 1.5311, 'grad_norm': 0.00044402347214724213, 'learning_rate': 0.2366280634816496, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:35<09:58, 3.67s/it] 69%|██████▉ | 358/520 [22:39<09:55, 3.67s/it] {'loss': 1.4944, 'grad_norm': 0.0005171452198243078, 'learning_rate': 0.23398396174233177, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:39<09:55, 3.67s/it] 69%|██████▉ | 359/520 [22:42<09:49, 3.66s/it] {'loss': 1.8426, 'grad_norm': 0.0005576739487766781, 'learning_rate': 0.231350195826588, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:42<09:49, 3.66s/it] 69%|██████▉ | 360/520 [22:46<09:46, 3.66s/it] {'loss': 1.8339, 'grad_norm': 0.0005501218742485359, 'learning_rate': 0.22872686806712034, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:46<09:46, 3.66s/it] 69%|██████▉ | 361/520 [22:50<09:41, 3.66s/it] {'loss': 1.8499, 'grad_norm': 0.0005490650421772334, 'learning_rate': 0.2261140803910644, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:50<09:41, 3.66s/it] 70%|██████▉ | 362/520 [22:53<09:36, 3.65s/it] {'loss': 1.5162, 'grad_norm': 0.0005299178609676189, 'learning_rate': 0.22351193431603028, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:53<09:36, 3.65s/it] 70%|██████▉ | 363/520 [22:57<09:32, 3.65s/it] {'loss': 1.6483, 'grad_norm': 0.0005365510644980345, 'learning_rate': 0.2209205309461581, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:57<09:32, 3.65s/it] 70%|███████ | 364/520 [23:01<09:29, 3.65s/it] {'loss': 1.8681, 'grad_norm': 0.0005239074838971899, 'learning_rate': 0.21833997096818897, 'epoch': 0.7} + 70%|███████ | 364/520 [23:01<09:29, 3.65s/it] 70%|███████ | 365/520 [23:04<09:25, 3.65s/it] {'loss': 1.6833, 'grad_norm': 0.0005236529789045097, 'learning_rate': 0.2157703546475539, 'epoch': 0.7} + 70%|███████ | 365/520 [23:04<09:25, 3.65s/it] 70%|███████ | 366/520 [23:08<09:22, 3.65s/it] {'loss': 1.6219, 'grad_norm': 0.000449472443684837, 'learning_rate': 0.2132117818244771, 'epoch': 0.7} + 70%|███████ | 366/520 [23:08<09:22, 3.65s/it] 71%|███████ | 367/520 [23:12<09:17, 3.64s/it] {'loss': 1.6264, 'grad_norm': 0.0004944314203703019, 'learning_rate': 0.21066435191009714, 'epoch': 0.71} + 71%|███████ | 367/520 [23:12<09:17, 3.64s/it] 71%|███████ | 368/520 [23:15<09:15, 3.65s/it] {'loss': 1.4524, 'grad_norm': 0.0005263429138737003, 'learning_rate': 0.2081281638826052, 'epoch': 0.71} + 71%|███████ | 368/520 [23:15<09:15, 3.65s/it] 71%|███████ | 369/520 [23:19<09:12, 3.66s/it] {'loss': 1.814, 'grad_norm': 0.0004900112169714482, 'learning_rate': 0.20560331628339767, 'epoch': 0.71} + 71%|███████ | 369/520 [23:19<09:12, 3.66s/it] 71%|███████ | 370/520 [23:23<09:08, 3.66s/it] {'loss': 1.5284, 'grad_norm': 0.00048285696314352434, 'learning_rate': 0.20308990721324927, 'epoch': 0.71} + 71%|███████ | 370/520 [23:23<09:08, 3.66s/it] 71%|███████▏ | 371/520 [23:26<09:04, 3.65s/it] {'loss': 1.5047, 'grad_norm': 0.0005436323780282241, 'learning_rate': 0.20058803432849986, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:26<09:04, 3.65s/it] 72%|███████▏ | 372/520 [23:30<09:01, 3.66s/it] {'loss': 1.9168, 'grad_norm': 0.00048162517590255024, 'learning_rate': 0.1980977948372612, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:30<09:01, 3.66s/it] 72%|███████▏ | 373/520 [23:34<09:00, 3.68s/it] {'loss': 1.7731, 'grad_norm': 0.0005677723641780619, 'learning_rate': 0.19561928549563967, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:34<09:00, 3.68s/it] 72%|███████▏ | 374/520 [23:37<08:56, 3.67s/it] {'loss': 1.611, 'grad_norm': 0.0005369098198846252, 'learning_rate': 0.19315260260397638, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:37<08:56, 3.67s/it] 72%|███████▏ | 375/520 [23:41<08:53, 3.68s/it] {'loss': 1.5067, 'grad_norm': 0.0005041160665494118, 'learning_rate': 0.1906978420031059, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:41<08:53, 3.68s/it] 72%|███████▏ | 376/520 [23:45<08:50, 3.69s/it] {'loss': 1.6296, 'grad_norm': 0.0005262565621645959, 'learning_rate': 0.18825509907063326, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:45<08:50, 3.69s/it] 72%|███████▎ | 377/520 [23:48<08:46, 3.68s/it] {'loss': 1.5795, 'grad_norm': 0.0005720136897895796, 'learning_rate': 0.18582446871722635, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:48<08:46, 3.68s/it] 73%|███████▎ | 378/520 [23:52<08:42, 3.68s/it] {'loss': 1.6267, 'grad_norm': 0.0004603037799005544, 'learning_rate': 0.18340604538293015, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:52<08:42, 3.68s/it] 73%|███████▎ | 379/520 [23:56<08:38, 3.68s/it] {'loss': 1.6407, 'grad_norm': 0.0004540986038853077, 'learning_rate': 0.18099992303349577, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:56<08:38, 3.68s/it] 73%|███████▎ | 380/520 [23:59<08:35, 3.68s/it] {'loss': 1.9251, 'grad_norm': 0.0005369074816655093, 'learning_rate': 0.17860619515673032, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:59<08:35, 3.68s/it] 73%|███████▎ | 381/520 [24:03<08:29, 3.67s/it] {'loss': 1.6282, 'grad_norm': 0.0005109079606370441, 'learning_rate': 0.17622495475886485, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:03<08:29, 3.67s/it] 73%|███████▎ | 382/520 [24:07<08:28, 3.68s/it] {'loss': 1.8181, 'grad_norm': 0.0005023034866650206, 'learning_rate': 0.17385629436093958, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:07<08:28, 3.68s/it] 74%|███████▎ | 383/520 [24:10<08:23, 3.68s/it] {'loss': 1.4351, 'grad_norm': 0.0005524142847361047, 'learning_rate': 0.17150030599520982, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:10<08:23, 3.68s/it] 74%|███████▍ | 384/520 [24:14<08:22, 3.69s/it] {'loss': 2.1103, 'grad_norm': 0.0006023268897922144, 'learning_rate': 0.16915708120157041, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:14<08:22, 3.69s/it] 74%|███████▍ | 385/520 [24:18<08:16, 3.68s/it] {'loss': 1.5983, 'grad_norm': 0.00047782414094608327, 'learning_rate': 0.16682671102399804, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:18<08:16, 3.68s/it] 74%|███████▍ | 386/520 [24:21<08:11, 3.67s/it] {'loss': 1.5036, 'grad_norm': 0.00043633569300149017, 'learning_rate': 0.16450928600701503, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:21<08:11, 3.67s/it] 74%|███████▍ | 387/520 [24:25<08:08, 3.67s/it] {'loss': 1.9536, 'grad_norm': 0.0005282812461246041, 'learning_rate': 0.16220489619216988, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:25<08:08, 3.67s/it] 75%|███████▍ | 388/520 [24:29<08:04, 3.67s/it] {'loss': 1.4754, 'grad_norm': 0.00045747613348280744, 'learning_rate': 0.1599136311145402, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:29<08:04, 3.67s/it] 75%|███████▍ | 389/520 [24:32<08:00, 3.66s/it] {'loss': 1.5735, 'grad_norm': 0.0005771039667443058, 'learning_rate': 0.15763557979925324, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:32<08:00, 3.66s/it] 75%|███████▌ | 390/520 [24:36<07:55, 3.66s/it] {'loss': 1.5994, 'grad_norm': 0.00047644184745355956, 'learning_rate': 0.1553708307580265, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:36<07:55, 3.66s/it] 75%|███████▌ | 391/520 [24:40<07:54, 3.67s/it] {'loss': 1.7216, 'grad_norm': 0.00046438499146472025, 'learning_rate': 0.15311947198572917, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:40<07:54, 3.67s/it] 75%|███████▌ | 392/520 [24:43<07:51, 3.68s/it] {'loss': 1.5131, 'grad_norm': 0.0005215911780126618, 'learning_rate': 0.15088159095696363, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:43<07:51, 3.68s/it] 76%|███████▌ | 393/520 [24:47<07:48, 3.69s/it] {'loss': 1.6995, 'grad_norm': 0.0005573074149873447, 'learning_rate': 0.14865727462266543, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:47<07:48, 3.69s/it] 76%|███████▌ | 394/520 [24:51<07:44, 3.68s/it] {'loss': 1.5875, 'grad_norm': 0.0004947178497380545, 'learning_rate': 0.14644660940672627, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:51<07:44, 3.68s/it] 76%|███████▌ | 395/520 [24:54<07:39, 3.67s/it] {'loss': 1.5531, 'grad_norm': 0.0005204577374189329, 'learning_rate': 0.14424968120263504, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:54<07:39, 3.67s/it] 76%|███████▌ | 396/520 [24:58<07:33, 3.66s/it] {'loss': 1.6118, 'grad_norm': 0.0005286390516350664, 'learning_rate': 0.14206657537014078, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:58<07:33, 3.66s/it] 76%|███████▋ | 397/520 [25:02<07:30, 3.66s/it] {'loss': 1.6107, 'grad_norm': 0.0005272782302579276, 'learning_rate': 0.1398973767319368, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:02<07:30, 3.66s/it] 77%|███████▋ | 398/520 [25:05<07:25, 3.65s/it] {'loss': 1.5759, 'grad_norm': 0.0005627203977380107, 'learning_rate': 0.13774216957036367, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:05<07:25, 3.65s/it] 77%|███████▋ | 399/520 [25:09<07:23, 3.66s/it] {'loss': 1.754, 'grad_norm': 0.0005469131697353641, 'learning_rate': 0.13560103762413583, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:09<07:23, 3.66s/it] 77%|███████▋ | 400/520 [25:13<07:19, 3.66s/it] {'loss': 1.7923, 'grad_norm': 0.0005987786614199917, 'learning_rate': 0.13347406408508694, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:13<07:19, 3.66s/it] 77%|███████▋ | 401/520 [25:16<07:16, 3.67s/it] {'loss': 1.3714, 'grad_norm': 0.0005533228888617237, 'learning_rate': 0.131361331594938, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:16<07:16, 3.67s/it] 77%|███████▋ | 402/520 [25:20<07:12, 3.67s/it] {'loss': 1.5098, 'grad_norm': 0.0004791419750576812, 'learning_rate': 0.12926292224208663, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:20<07:12, 3.67s/it] 78%|███████▊ | 403/520 [25:24<07:08, 3.66s/it] {'loss': 1.5713, 'grad_norm': 0.0005187899253733914, 'learning_rate': 0.1271789175584172, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:24<07:08, 3.66s/it] 78%|███████▊ | 404/520 [25:27<07:03, 3.65s/it] {'loss': 1.4702, 'grad_norm': 0.0006875901001998534, 'learning_rate': 0.12510939851613284, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:27<07:03, 3.65s/it] 78%|███████▊ | 405/520 [25:31<07:01, 3.66s/it] {'loss': 1.7514, 'grad_norm': 0.0005052736672995885, 'learning_rate': 0.12305444552461009, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:31<07:01, 3.66s/it] 78%|███████▊ | 406/520 [25:35<06:55, 3.65s/it] {'loss': 1.6985, 'grad_norm': 0.0006134117693778445, 'learning_rate': 0.12101413842727343, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:35<06:55, 3.65s/it] 78%|███████▊ | 407/520 [25:38<06:54, 3.67s/it] {'loss': 1.6992, 'grad_norm': 0.00048559722252043566, 'learning_rate': 0.1189885564984946, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:38<06:54, 3.67s/it] 78%|███████▊ | 408/520 [25:42<06:47, 3.64s/it] {'loss': 1.544, 'grad_norm': 0.0005194301659428426, 'learning_rate': 0.11697777844051105, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:42<06:47, 3.64s/it] 79%|███████▊ | 409/520 [25:46<06:45, 3.65s/it] {'loss': 1.7187, 'grad_norm': 0.000541545850228126, 'learning_rate': 0.1149818823803686, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:46<06:45, 3.65s/it] 79%|███████▉ | 410/520 [25:49<06:41, 3.65s/it] {'loss': 1.3791, 'grad_norm': 0.00047765306261175283, 'learning_rate': 0.1130009458668863, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:49<06:41, 3.65s/it] 79%|███████▉ | 411/520 [25:53<06:37, 3.64s/it] {'loss': 1.6717, 'grad_norm': 0.0005206486531906023, 'learning_rate': 0.11103504586764262, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:53<06:37, 3.64s/it] 79%|███████▉ | 412/520 [25:57<06:34, 3.65s/it] {'loss': 1.5895, 'grad_norm': 0.0004879424499381408, 'learning_rate': 0.1090842587659851, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:57<06:34, 3.65s/it] 79%|███████▉ | 413/520 [26:00<06:31, 3.66s/it] {'loss': 1.881, 'grad_norm': 0.0005315022738215822, 'learning_rate': 0.10714866035806325, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:00<06:31, 3.66s/it] 80%|███████▉ | 414/520 [26:04<06:26, 3.65s/it] {'loss': 1.5647, 'grad_norm': 0.0005256919378830179, 'learning_rate': 0.10522832584988234, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:04<06:26, 3.65s/it] 80%|███████▉ | 415/520 [26:08<06:26, 3.68s/it] {'loss': 1.5269, 'grad_norm': 0.00045361541324920584, 'learning_rate': 0.10332332985438247, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:08<06:26, 3.68s/it] 80%|████████ | 416/520 [26:11<06:21, 3.67s/it] {'loss': 1.439, 'grad_norm': 0.0005144470185878597, 'learning_rate': 0.10143374638853891, 'epoch': 0.8} + 80%|████████ | 416/520 [26:11<06:21, 3.67s/it] 80%|████████ | 417/520 [26:15<06:18, 3.68s/it] {'loss': 1.6201, 'grad_norm': 0.00046489197132413884, 'learning_rate': 0.09955964887048607, 'epoch': 0.8} + 80%|████████ | 417/520 [26:15<06:18, 3.68s/it] 80%|████████ | 418/520 [26:19<06:14, 3.67s/it] {'loss': 1.6247, 'grad_norm': 0.0005040464690667864, 'learning_rate': 0.09770111011666582, 'epoch': 0.8} + 80%|████████ | 418/520 [26:19<06:14, 3.67s/it] 81%|████████ | 419/520 [26:22<06:10, 3.67s/it] {'loss': 1.6155, 'grad_norm': 0.0005571809108457398, 'learning_rate': 0.09585820233899739, 'epoch': 0.81} + 81%|████████ | 419/520 [26:22<06:10, 3.67s/it] 81%|████████ | 420/520 [26:26<06:06, 3.67s/it] {'loss': 1.4719, 'grad_norm': 0.0005389500413328429, 'learning_rate': 0.09403099714207175, 'epoch': 0.81} + 81%|████████ | 420/520 [26:26<06:06, 3.67s/it] 81%|████████ | 421/520 [26:30<06:04, 3.68s/it] {'loss': 1.3721, 'grad_norm': 0.0005601058408969753, 'learning_rate': 0.09221956552036992, 'epoch': 0.81} + 81%|████████ | 421/520 [26:30<06:04, 3.68s/it] 81%|████████ | 422/520 [26:33<05:58, 3.66s/it] {'loss': 1.5467, 'grad_norm': 0.0005646517039402788, 'learning_rate': 0.09042397785550405, 'epoch': 0.81} + 81%|████████ | 422/520 [26:33<05:58, 3.66s/it] 81%|████████▏ | 423/520 [26:37<05:56, 3.67s/it] {'loss': 1.5546, 'grad_norm': 0.0005292544842096215, 'learning_rate': 0.08864430391348332, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:37<05:56, 3.67s/it] 82%|████████▏ | 424/520 [26:41<05:52, 3.67s/it] {'loss': 1.9152, 'grad_norm': 0.0005429226287979812, 'learning_rate': 0.08688061284200266, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:41<05:52, 3.67s/it] 82%|████████▏ | 425/520 [26:44<05:49, 3.68s/it] {'loss': 1.5132, 'grad_norm': 0.000472513921696828, 'learning_rate': 0.08513297316775625, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:44<05:49, 3.68s/it] 82%|████████▏ | 426/520 [26:48<05:46, 3.68s/it] {'loss': 1.6424, 'grad_norm': 0.0006880036246083555, 'learning_rate': 0.08340145279377559, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:48<05:46, 3.68s/it] 82%|████████▏ | 427/520 [26:52<05:40, 3.67s/it] {'loss': 1.439, 'grad_norm': 0.0005005963647081787, 'learning_rate': 0.08168611899679012, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:52<05:40, 3.67s/it] 82%|████████▏ | 428/520 [26:55<05:35, 3.65s/it] {'loss': 1.425, 'grad_norm': 0.0006157937249726935, 'learning_rate': 0.0799870384246143, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:55<05:35, 3.65s/it] 82%|████████▎ | 429/520 [26:59<05:33, 3.67s/it] {'loss': 1.5673, 'grad_norm': 0.000586730467015636, 'learning_rate': 0.07830427709355725, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:59<05:33, 3.67s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:03<05:28, 3.65s/it] {'loss': 1.5418, 'grad_norm': 0.0004736232779142686, 'learning_rate': 0.07663790038585794, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:03<05:28, 3.65s/it] 83%|████████▎ | 431/520 [27:06<05:24, 3.65s/it] {'loss': 1.7869, 'grad_norm': 0.0005126784428102366, 'learning_rate': 0.07498797304714544, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:06<05:24, 3.65s/it] 83%|████████▎ | 432/520 [27:10<05:21, 3.65s/it] {'loss': 1.4406, 'grad_norm': 0.0004700412237554559, 'learning_rate': 0.0733545591839222, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:10<05:21, 3.65s/it] 83%|████████▎ | 433/520 [27:14<05:16, 3.64s/it] {'loss': 1.6077, 'grad_norm': 0.0005046478946909804, 'learning_rate': 0.07173772226107433, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:14<05:16, 3.64s/it] 83%|████████▎ | 434/520 [27:17<05:13, 3.65s/it] {'loss': 1.3153, 'grad_norm': 0.0005051730370565024, 'learning_rate': 0.07013752509940485, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:17<05:13, 3.65s/it] 84%|████████▎ | 435/520 [27:21<05:14, 3.70s/it] {'loss': 1.6808, 'grad_norm': 0.0005579820000905483, 'learning_rate': 0.06855402987319348, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:21<05:14, 3.70s/it] 84%|████████▍ | 436/520 [27:25<05:15, 3.75s/it] {'loss': 1.4154, 'grad_norm': 0.0005223612891554726, 'learning_rate': 0.06698729810778065, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:25<05:15, 3.75s/it] 84%|████████▍ | 437/520 [27:29<05:15, 3.81s/it] {'loss': 1.7043, 'grad_norm': 0.0005183745611391045, 'learning_rate': 0.0654373906771768, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:29<05:15, 3.81s/it] 84%|████████▍ | 438/520 [27:33<05:12, 3.81s/it] {'loss': 1.4296, 'grad_norm': 0.0005058529034957521, 'learning_rate': 0.06390436780169734, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:33<05:12, 3.81s/it] 84%|████████▍ | 439/520 [27:37<05:10, 3.83s/it] {'loss': 1.7286, 'grad_norm': 0.00042699731242864847, 'learning_rate': 0.06238828904562316, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:37<05:10, 3.83s/it] 85%|████████▍ | 440/520 [27:40<05:08, 3.85s/it] {'loss': 1.5343, 'grad_norm': 0.00047676807835917757, 'learning_rate': 0.06088921331488567, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:40<05:08, 3.85s/it] 85%|████████▍ | 441/520 [27:44<05:05, 3.86s/it] {'loss': 1.7751, 'grad_norm': 0.0005020106620699045, 'learning_rate': 0.0594071988547788, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:44<05:05, 3.86s/it] 85%|████████▌ | 442/520 [27:48<05:02, 3.88s/it] {'loss': 1.5675, 'grad_norm': 0.0005158795874501971, 'learning_rate': 0.05794230324769517, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:48<05:02, 3.88s/it] 85%|████████▌ | 443/520 [27:52<04:58, 3.87s/it] {'loss': 1.5976, 'grad_norm': 0.0004700425427714976, 'learning_rate': 0.05649458341088914, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:52<04:58, 3.87s/it] 85%|████████▌ | 444/520 [27:56<04:55, 3.89s/it] {'loss': 1.5478, 'grad_norm': 0.00046822675943816586, 'learning_rate': 0.05506409559426573, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:56<04:55, 3.89s/it] 86%|████████▌ | 445/520 [28:00<04:51, 3.89s/it] {'loss': 1.4604, 'grad_norm': 0.000492827867743114, 'learning_rate': 0.05365089537819434, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:00<04:51, 3.89s/it] 86%|████████▌ | 446/520 [28:04<04:47, 3.89s/it] {'loss': 1.8505, 'grad_norm': 0.0005180741583094022, 'learning_rate': 0.052255037671349536, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:04<04:47, 3.89s/it] 86%|████████▌ | 447/520 [28:08<04:45, 3.91s/it] {'loss': 1.581, 'grad_norm': 0.0005070637151141356, 'learning_rate': 0.05087657670857798, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:08<04:45, 3.91s/it] 86%|████████▌ | 448/520 [28:12<04:41, 3.91s/it] {'loss': 1.532, 'grad_norm': 0.0005448708345076841, 'learning_rate': 0.04951556604879048, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:12<04:41, 3.91s/it] 86%|████████▋ | 449/520 [28:16<04:37, 3.91s/it] {'loss': 1.8356, 'grad_norm': 0.0005735834609163062, 'learning_rate': 0.04817205857288176, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:16<04:37, 3.91s/it] 87%|████████▋ | 450/520 [28:19<04:34, 3.91s/it] {'loss': 1.6285, 'grad_norm': 0.000523586246801161, 'learning_rate': 0.04684610648167503, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:19<04:34, 3.91s/it] 87%|████████▋ | 451/520 [28:23<04:29, 3.91s/it] {'loss': 1.6229, 'grad_norm': 0.000530614357676961, 'learning_rate': 0.04553776129389453, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:23<04:29, 3.91s/it] 87%|████████▋ | 452/520 [28:27<04:25, 3.91s/it] {'loss': 1.8706, 'grad_norm': 0.0004921405464693172, 'learning_rate': 0.04424707384416343, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:27<04:25, 3.91s/it] 87%|████████▋ | 453/520 [28:31<04:21, 3.91s/it] {'loss': 1.8225, 'grad_norm': 0.000498457311145182, 'learning_rate': 0.042974094281028496, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:31<04:21, 3.91s/it] 87%|████████▋ | 454/520 [28:35<04:17, 3.90s/it] {'loss': 1.4717, 'grad_norm': 0.0004808253123365072, 'learning_rate': 0.0417188720650119, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:35<04:17, 3.90s/it] 88%|████████▊ | 455/520 [28:39<04:13, 3.90s/it] {'loss': 1.6432, 'grad_norm': 0.00046143800007941856, 'learning_rate': 0.04048145596668967, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:39<04:13, 3.90s/it] 88%|████████▊ | 456/520 [28:43<04:04, 3.82s/it] {'loss': 1.5405, 'grad_norm': 0.0005826634043402464, 'learning_rate': 0.03926189406479613, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:43<04:04, 3.82s/it] 88%|████████▊ | 457/520 [28:46<03:58, 3.79s/it] {'loss': 1.9715, 'grad_norm': 0.0005336128641867913, 'learning_rate': 0.03806023374435663, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:46<03:58, 3.79s/it] 88%|████████▊ | 458/520 [28:50<03:53, 3.77s/it] {'loss': 1.7585, 'grad_norm': 0.000534364053783333, 'learning_rate': 0.036876521694845676, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:50<03:53, 3.77s/it] 88%|████████▊ | 459/520 [28:54<03:48, 3.75s/it] {'loss': 1.6202, 'grad_norm': 0.00047450916952942497, 'learning_rate': 0.03571080390837322, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:54<03:48, 3.75s/it] 88%|████████▊ | 460/520 [28:57<03:44, 3.73s/it] {'loss': 1.4681, 'grad_norm': 0.0005021793880242261, 'learning_rate': 0.03456312567789793, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:57<03:44, 3.73s/it] 89%|████████▊ | 461/520 [29:01<03:40, 3.73s/it] {'loss': 2.0192, 'grad_norm': 0.0005403058370914876, 'learning_rate': 0.03343353159546675, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:01<03:40, 3.73s/it] 89%|████████▉ | 462/520 [29:05<03:35, 3.71s/it] {'loss': 1.9156, 'grad_norm': 0.000506650584314564, 'learning_rate': 0.032322065550483, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:05<03:35, 3.71s/it] 89%|████████▉ | 463/520 [29:08<03:30, 3.69s/it] {'loss': 1.4421, 'grad_norm': 0.0005285271374800597, 'learning_rate': 0.031228770728000455, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:08<03:30, 3.69s/it] 89%|████████▉ | 464/520 [29:12<03:25, 3.68s/it] {'loss': 1.6427, 'grad_norm': 0.0005392858098928965, 'learning_rate': 0.03015368960704584, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:12<03:25, 3.68s/it] 89%|████████▉ | 465/520 [29:16<03:22, 3.68s/it] {'loss': 1.7554, 'grad_norm': 0.0005582145896661832, 'learning_rate': 0.029096863958968266, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:16<03:22, 3.68s/it] 90%|████████▉ | 466/520 [29:19<03:18, 3.68s/it] {'loss': 1.6159, 'grad_norm': 0.0005179796772760256, 'learning_rate': 0.028058334845816213, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:19<03:18, 3.68s/it] 90%|████████▉ | 467/520 [29:23<03:15, 3.68s/it] {'loss': 1.7643, 'grad_norm': 0.0005115647881215742, 'learning_rate': 0.02703814261874199, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:23<03:15, 3.68s/it] 90%|█████████ | 468/520 [29:27<03:13, 3.73s/it] {'loss': 1.5996, 'grad_norm': 0.0005761301069074459, 'learning_rate': 0.02603632691643415, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:27<03:13, 3.73s/it] 90%|█████████ | 469/520 [29:31<03:12, 3.77s/it] {'loss': 1.6556, 'grad_norm': 0.000513957420874148, 'learning_rate': 0.025052926663577002, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:31<03:12, 3.77s/it] 90%|█████████ | 470/520 [29:35<03:09, 3.80s/it] {'loss': 1.487, 'grad_norm': 0.0004921345020608585, 'learning_rate': 0.02408798006933882, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:35<03:09, 3.80s/it] 91%|█████████ | 471/520 [29:39<03:06, 3.82s/it] {'loss': 1.563, 'grad_norm': 0.0005741318686236646, 'learning_rate': 0.02314152462588659, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:39<03:06, 3.82s/it] 91%|█████████ | 472/520 [29:42<03:03, 3.83s/it] {'loss': 1.5035, 'grad_norm': 0.0004908471341049239, 'learning_rate': 0.022213597106929606, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:42<03:03, 3.83s/it] 91%|█████████ | 473/520 [29:46<03:00, 3.84s/it] {'loss': 1.5611, 'grad_norm': 0.0005166622026667884, 'learning_rate': 0.021304233566290964, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:46<03:00, 3.84s/it] 91%|█████████ | 474/520 [29:50<02:57, 3.87s/it] {'loss': 1.807, 'grad_norm': 0.0005307213790795485, 'learning_rate': 0.020413469336506118, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:50<02:57, 3.87s/it] 91%|█████████▏| 475/520 [29:54<02:53, 3.86s/it] {'loss': 1.6697, 'grad_norm': 0.0005135096896608815, 'learning_rate': 0.019541339027450255, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:54<02:53, 3.86s/it] 92%|█████████▏| 476/520 [29:58<02:50, 3.87s/it] {'loss': 1.5301, 'grad_norm': 0.000535700904280509, 'learning_rate': 0.018687876524993985, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:58<02:50, 3.87s/it] 92%|█████████▏| 477/520 [30:02<02:45, 3.86s/it] {'loss': 1.5138, 'grad_norm': 0.0005178901245178233, 'learning_rate': 0.01785311498968617, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:02<02:45, 3.86s/it] 92%|█████████▏| 478/520 [30:06<02:41, 3.86s/it] {'loss': 1.4685, 'grad_norm': 0.0004590931284442672, 'learning_rate': 0.0170370868554659, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:06<02:41, 3.86s/it] 92%|█████████▏| 479/520 [30:10<02:38, 3.87s/it] {'loss': 1.8133, 'grad_norm': 0.0005926617257453544, 'learning_rate': 0.016239823828401945, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:10<02:38, 3.87s/it] 92%|█████████▏| 480/520 [30:13<02:34, 3.87s/it] {'loss': 1.7939, 'grad_norm': 0.00047698583213484863, 'learning_rate': 0.015461356885461075, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:13<02:34, 3.87s/it] 92%|█████████▎| 481/520 [30:17<02:31, 3.88s/it] {'loss': 1.8223, 'grad_norm': 0.0005097599227577087, 'learning_rate': 0.014701716273304521, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:17<02:31, 3.88s/it] 93%|█████████▎| 482/520 [30:21<02:27, 3.87s/it] {'loss': 1.8526, 'grad_norm': 0.000544569836483178, 'learning_rate': 0.01396093150711275, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:21<02:27, 3.87s/it] 93%|█████████▎| 483/520 [30:25<02:23, 3.87s/it] {'loss': 1.595, 'grad_norm': 0.0005269552784249096, 'learning_rate': 0.013239031369438325, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:25<02:23, 3.87s/it] 93%|█████████▎| 484/520 [30:29<02:19, 3.87s/it] {'loss': 1.589, 'grad_norm': 0.0005283991456294674, 'learning_rate': 0.01253604390908819, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:29<02:19, 3.87s/it] 93%|█████████▎| 485/520 [30:33<02:15, 3.86s/it] {'loss': 1.5197, 'grad_norm': 0.0004910535913690633, 'learning_rate': 0.011851996440033319, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:33<02:15, 3.86s/it] 93%|█████████▎| 486/520 [30:37<02:11, 3.86s/it] {'loss': 1.647, 'grad_norm': 0.0004817187735884496, 'learning_rate': 0.01118691554034773, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:37<02:11, 3.86s/it] 94%|█████████▎| 487/520 [30:40<02:06, 3.84s/it] {'loss': 1.4801, 'grad_norm': 0.0005922031433116205, 'learning_rate': 0.010540827051175816, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:40<02:06, 3.84s/it] 94%|█████████▍| 488/520 [30:44<02:00, 3.78s/it] {'loss': 1.4311, 'grad_norm': 0.000531540632173336, 'learning_rate': 0.009913756075728086, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:44<02:00, 3.78s/it] 94%|█████████▍| 489/520 [30:48<01:56, 3.75s/it] {'loss': 1.7795, 'grad_norm': 0.0004763779885287472, 'learning_rate': 0.009305726978306172, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:48<01:56, 3.75s/it] 94%|█████████▍| 490/520 [30:51<01:52, 3.74s/it] {'loss': 1.5758, 'grad_norm': 0.0004986985799581317, 'learning_rate': 0.008716763383355863, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:51<01:52, 3.74s/it] 94%|█████████▍| 491/520 [30:55<01:47, 3.72s/it] {'loss': 1.51, 'grad_norm': 0.0005095998185111789, 'learning_rate': 0.008146888174549338, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:55<01:47, 3.72s/it] 95%|█████████▍| 492/520 [30:59<01:43, 3.71s/it] {'loss': 1.687, 'grad_norm': 0.0005538712583304222, 'learning_rate': 0.00759612349389599, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:59<01:43, 3.71s/it] 95%|█████████▍| 493/520 [31:02<01:39, 3.69s/it] {'loss': 1.9152, 'grad_norm': 0.0005537578960455804, 'learning_rate': 0.007064490740882057, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:02<01:39, 3.69s/it] 95%|█████████▌| 494/520 [31:06<01:36, 3.70s/it] {'loss': 1.5766, 'grad_norm': 0.0005143257811048844, 'learning_rate': 0.006552010571639455, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:06<01:36, 3.70s/it] 95%|█████████▌| 495/520 [31:10<01:32, 3.69s/it] {'loss': 1.5346, 'grad_norm': 0.0005114978280494216, 'learning_rate': 0.006058702898142643, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:10<01:32, 3.69s/it] 95%|█████████▌| 496/520 [31:13<01:28, 3.68s/it] {'loss': 1.4738, 'grad_norm': 0.0005549365039491534, 'learning_rate': 0.0055845868874357385, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:13<01:28, 3.68s/it] 96%|█████████▌| 497/520 [31:17<01:24, 3.68s/it] {'loss': 1.7451, 'grad_norm': 0.0005195513270408526, 'learning_rate': 0.005129680960887006, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:17<01:24, 3.68s/it] 96%|█████████▌| 498/520 [31:21<01:21, 3.68s/it] {'loss': 1.5351, 'grad_norm': 0.0005070877771176041, 'learning_rate': 0.004694002793473595, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:21<01:21, 3.68s/it] 96%|█████████▌| 499/520 [31:25<01:17, 3.68s/it] {'loss': 1.8926, 'grad_norm': 0.0005095418713663429, 'learning_rate': 0.004277569313094809, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:25<01:17, 3.68s/it] 96%|█████████▌| 500/520 [31:28<01:13, 3.68s/it] {'loss': 1.6969, 'grad_norm': 0.0005613126462353866, 'learning_rate': 0.0038803966999139683, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:28<01:13, 3.68s/it] 96%|█████████▋| 501/520 [31:32<01:09, 3.68s/it] {'loss': 1.7916, 'grad_norm': 0.0006207478239913811, 'learning_rate': 0.0035025003857301895, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:32<01:09, 3.68s/it] 97%|█████████▋| 502/520 [31:36<01:06, 3.69s/it] {'loss': 1.5757, 'grad_norm': 0.0005011711654948547, 'learning_rate': 0.003143895053378698, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:36<01:06, 3.69s/it] 97%|█████████▋| 503/520 [31:39<01:03, 3.72s/it] {'loss': 1.7727, 'grad_norm': 0.0005413148281668079, 'learning_rate': 0.002804594636160118, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:39<01:03, 3.72s/it] 97%|█████████▋| 504/520 [31:43<00:59, 3.74s/it] {'loss': 1.5853, 'grad_norm': 0.000549353314523457, 'learning_rate': 0.002484612317299295, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:43<00:59, 3.74s/it] 97%|█████████▋| 505/520 [31:47<00:56, 3.76s/it] {'loss': 1.6397, 'grad_norm': 0.0005157629057187716, 'learning_rate': 0.0021839605294330933, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:47<00:56, 3.76s/it] 97%|█████████▋| 506/520 [31:51<00:52, 3.77s/it] {'loss': 1.5195, 'grad_norm': 0.0005112941162789675, 'learning_rate': 0.0019026509541272274, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:51<00:52, 3.77s/it] 98%|█████████▊| 507/520 [31:55<00:48, 3.77s/it] {'loss': 1.95, 'grad_norm': 0.0004728070076419747, 'learning_rate': 0.0016406945214224589, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:55<00:48, 3.77s/it] 98%|█████████▊| 508/520 [31:58<00:45, 3.77s/it] {'loss': 1.6647, 'grad_norm': 0.0005698286523194864, 'learning_rate': 0.0013981014094099353, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:58<00:45, 3.77s/it] 98%|█████████▊| 509/520 [32:02<00:41, 3.79s/it] {'loss': 1.6094, 'grad_norm': 0.0004971651192159395, 'learning_rate': 0.0011748810438355628, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:02<00:41, 3.79s/it] 98%|█████████▊| 510/520 [32:06<00:38, 3.82s/it] {'loss': 1.5651, 'grad_norm': 0.0005994533091707735, 'learning_rate': 0.0009710420977340761, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:06<00:38, 3.82s/it] 98%|█████████▊| 511/520 [32:10<00:34, 3.81s/it] {'loss': 1.5227, 'grad_norm': 0.0004950473483657727, 'learning_rate': 0.0007865924910916977, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:10<00:34, 3.81s/it] 98%|█████████▊| 512/520 [32:14<00:30, 3.80s/it] {'loss': 1.4032, 'grad_norm': 0.0005266042164473946, 'learning_rate': 0.0006215393905388278, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:14<00:30, 3.80s/it] 99%|█████████▊| 513/520 [32:17<00:26, 3.81s/it] {'loss': 1.635, 'grad_norm': 0.0005822247718426792, 'learning_rate': 0.0004758892090711009, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:17<00:26, 3.81s/it] 99%|█████████▉| 514/520 [32:21<00:22, 3.77s/it] {'loss': 1.6049, 'grad_norm': 0.00044465992845484927, 'learning_rate': 0.00034964760580069587, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:21<00:22, 3.77s/it] 99%|█████████▉| 515/520 [32:25<00:18, 3.74s/it] {'loss': 1.7004, 'grad_norm': 0.0006856574880556891, 'learning_rate': 0.00024281948573617873, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:25<00:18, 3.74s/it] 99%|█████████▉| 516/520 [32:28<00:14, 3.72s/it] {'loss': 1.5424, 'grad_norm': 0.0004914376276757997, 'learning_rate': 0.00015540899959187726, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:28<00:14, 3.72s/it] 99%|█████████▉| 517/520 [32:32<00:11, 3.69s/it] {'loss': 1.8401, 'grad_norm': 0.000508355086470322, 'learning_rate': 8.741954362678772e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:32<00:11, 3.69s/it] 100%|█████████▉| 518/520 [32:36<00:07, 3.66s/it] {'loss': 1.592, 'grad_norm': 0.0005624150348570744, 'learning_rate': 3.885375951256931e-05, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:36<00:07, 3.66s/it] 100%|█████████▉| 519/520 [32:39<00:03, 3.66s/it] {'loss': 1.8095, 'grad_norm': 0.0006146939073267151, 'learning_rate': 9.713534230904042e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:39<00:03, 3.66s/it] 100%|██████████| 520/520 [32:44<00:00, 3.89s/it] {'loss': 1.9167, 'grad_norm': 0.000563716727720147, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:44<00:00, 3.89s/it] {'train_runtime': 1964.2509, 'train_samples_per_second': 33.87, 'train_steps_per_second': 0.265, 'train_loss': 1.8245452147263748, 'epoch': 1.0} + 100%|██████████| 520/520 [32:44<00:00, 3.89s/it] 100%|██████████| 520/520 [32:44<00:00, 3.78s/it] +[2025-10-09 05:13:18,531] [INFO] [launch.py:348:main] Process 788960 exits successfully. +[2025-10-09 05:13:19,532] [INFO] [launch.py:348:main] Process 788963 exits successfully. +[2025-10-09 05:13:19,533] [INFO] [launch.py:348:main] Process 788959 exits successfully. +[2025-10-09 05:13:19,533] [INFO] [launch.py:348:main] Process 788958 exits successfully. +[2025-10-09 05:13:19,533] [INFO] [launch.py:348:main] Process 788957 exits successfully. +[2025-10-09 05:13:19,534] [INFO] [launch.py:348:main] Process 788961 exits successfully. +[2025-10-09 05:13:20,535] [INFO] [launch.py:348:main] Process 788962 exits successfully. +[2025-10-09 05:13:24,540] [INFO] [launch.py:348:main] Process 788956 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1_connector-3.0_0.5_1_ablation_20251009_043900.log +Timestamp: 2025-10-09 05:13:27 +===================================== diff --git a/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251009_073254.log b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251009_073254.log new file mode 100644 index 0000000000000000000000000000000000000000..08d924cfe89a6d7973e04c1e089b420e637c708f --- /dev/null +++ b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251009_073254.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251009_073254.log +Timestamp: 2025-10-09 07:32:54 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 07:32:57,268] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 07:32:59,928] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-09 07:32:59,929] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 1e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 1e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 07:33:02,546] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 07:33:03,584] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-09 07:33:03,584] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-09 07:33:03,584] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-09 07:33:03,584] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-09 07:33:03,584] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-09 07:33:03,584] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-09 07:33:03,584] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-09 07:33:03,587] [INFO] [launch.py:253:main] process 887202 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 07:33:03,589] [INFO] [launch.py:253:main] process 887203 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 07:33:03,591] [INFO] [launch.py:253:main] process 887204 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 07:33:03,593] [INFO] [launch.py:253:main] process 887205 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 07:33:03,595] [INFO] [launch.py:253:main] process 887206 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 07:33:03,597] [INFO] [launch.py:253:main] process 887207 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 07:33:03,599] [INFO] [launch.py:253:main] process 887208 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 07:33:03,601] [INFO] [launch.py:253:main] process 887209 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 07:33:10,245] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 07:33:10,533] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 07:33:10,549] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 07:33:10,569] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 07:33:10,579] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 07:33:10,579] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 07:33:10,586] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 07:33:10,618] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 07:33:10,660] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 07:33:10,947] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 07:33:10,954] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 07:33:10,955] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-09 07:33:10,969] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 07:33:10,981] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 07:33:10,982] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 07:33:10,983] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 07:33:11,017] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:887202:887202 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887202:887202 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887202:887202 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:887202:887202 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:887202:887202 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:887202:887202 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:887205:887205 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:887205:887205 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887205:887205 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887205:887205 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:887205:887205 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:887205:887205 [3] NCCL INFO NET/Plugin: Using internal network plugin. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Using network Socket +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:887209:887209 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:887209:887209 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887209:887209 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887209:887209 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:887209:887209 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:887209:887209 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:887208:887208 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:887208:887208 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887208:887208 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887208:887208 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:887208:887208 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:887208:887208 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:887207:887207 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:887207:887207 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887207:887207 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887207:887207 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:887207:887207 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:887207:887207 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:887206:887206 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:887206:887206 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887206:887206 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887206:887206 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:887206:887206 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:887206:887206 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:887203:887203 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:887203:887203 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887203:887203 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887203:887203 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:887203:887203 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:887203:887203 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:887204:887204 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:887204:887204 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887204:887204 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887204:887204 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:887204:887204 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:887204:887204 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO ncclCommInitRank comm 0x55fe55afbae0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xe033832f63381e63 - Init START +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO ncclCommInitRank comm 0x560f6a9d3a80 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xe033832f63381e63 - Init START +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO ncclCommInitRank comm 0x55d4cb670bd0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xe033832f63381e63 - Init START +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO ncclCommInitRank comm 0x559e282f9d60 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xe033832f63381e63 - Init START +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO ncclCommInitRank comm 0x55a6ea377750 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xe033832f63381e63 - Init START +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO ncclCommInitRank comm 0x5599d3212cd0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xe033832f63381e63 - Init START +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO ncclCommInitRank comm 0x560836438a80 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xe033832f63381e63 - Init START +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO ncclCommInitRank comm 0x557cf573a7e0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xe033832f63381e63 - Init START +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO comm 0x560f6a9d3a80 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO comm 0x55fe55afbae0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO comm 0x55d4cb670bd0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO comm 0x560836438a80 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO comm 0x5599d3212cd0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO comm 0x559e282f9d60 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO comm 0x55a6ea377750 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO comm 0x557cf573a7e0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:887209:888788 [7] NCCL INFO ncclCommInitRank comm 0x560836438a80 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xe033832f63381e63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:887207:888790 [5] NCCL INFO ncclCommInitRank comm 0x55a6ea377750 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xe033832f63381e63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:887205:888781 [3] NCCL INFO ncclCommInitRank comm 0x557cf573a7e0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xe033832f63381e63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:887203:888792 [1] NCCL INFO ncclCommInitRank comm 0x55fe55afbae0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xe033832f63381e63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:887206:888791 [4] NCCL INFO ncclCommInitRank comm 0x559e282f9d60 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xe033832f63381e63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:887208:888789 [6] NCCL INFO ncclCommInitRank comm 0x5599d3212cd0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xe033832f63381e63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:887202:888777 [0] NCCL INFO ncclCommInitRank comm 0x55d4cb670bd0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xe033832f63381e63 - Init COMPLETE +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:887204:888810 [2] NCCL INFO ncclCommInitRank comm 0x560f6a9d3a80 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xe033832f63381e63 - Init COMPLETE +[2025-10-09 07:33:56,909] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.laSome weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from loading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model + + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +yers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-09 07:38:45,760] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-09 07:38:58,924 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-09 07:38:58,928 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:007->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:887206:893984 [4] NCCL INFO ncclCommInitRank comm 0x7f0bec06ab00 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x772d9596f2c77b4f - Init COMPLETE +ywang29-vrdb-test1-worker-0:887208:893986 [6] NCCL INFO ncclCommInitRank comm 0x7f3e4806b070 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x772d9596f2c77b4f - Init COMPLETE +ywang29-vrdb-test1-worker-0:887209:893985 [7] NCCL INFO ncclCommInitRank comm 0x7fed7c06a9a0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x772d9596f2c77b4f - Init COMPLETE +ywang29-vrdb-test1-worker-0:887207:893988 [5] NCCL INFO ncclCommInitRank comm 0x7f947006b0b0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x772d9596f2c77b4f - Init COMPLETE +ywang29-vrdb-test1-worker-0:887205:893989 [3] NCCL INFO ncclCommInitRank comm 0x7fc0b006ae60 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x772d9596f2c77b4f - Init COMPLETE +ywang29-vrdb-test1-worker-0:887203:893983 [1] NCCL INFO ncclCommInitRank comm 0x7f5a9406a450 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x772d9596f2c77b4f - Init COMPLETE +ywang29-vrdb-test1-worker-0:887202:893982 [0] NCCL INFO ncclCommInitRank comm 0x7f40e006a8b0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x772d9596f2c77b4f - Init COMPLETE +ywang29-vrdb-test1-worker-0:887204:893987 [2] NCCL INFO ncclCommInitRank comm 0x7fca0406a310 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x772d9596f2c77b4f - Init COMPLETE + 0%| | 1/520 [00:12<1:48:40, 12.56s/it] {'loss': 2.0453, 'grad_norm': 0.0048347622265943494, 'learning_rate': 0.00625, 'epoch': 0.0} + 0%| | 1/520 [00:12<1:48:40, 12.56s/it] 0%| | 2/520 [00:16<1:03:55, 7.40s/it] {'loss': 2.0549, 'grad_norm': 0.005249075345832225, 'learning_rate': 0.0125, 'epoch': 0.0} + 0%| | 2/520 [00:16<1:03:55, 7.40s/it] 1%| | 3/520 [00:20<49:20, 5.73s/it] {'loss': 2.1899, 'grad_norm': 0.006007403965055447, 'learning_rate': 0.018750000000000003, 'epoch': 0.01} + 1%| | 3/520 [00:20<49:20, 5.73s/it] 1%| | 4/520 [00:23<42:14, 4.91s/it] {'loss': 2.0656, 'grad_norm': 0.004964247083203992, 'learning_rate': 0.025, 'epoch': 0.01} + 1%| | 4/520 [00:23<42:14, 4.91s/it] 1%| | 5/520 [00:27<38:12, 4.45s/it] {'loss': 2.2333, 'grad_norm': 0.005481931151391072, 'learning_rate': 0.03125, 'epoch': 0.01} + 1%| | 5/520 [00:27<38:12, 4.45s/it] 1%| | 6/520 [00:31<35:48, 4.18s/it] {'loss': 1.6754, 'grad_norm': 0.0028024293896292047, 'learning_rate': 0.037500000000000006, 'epoch': 0.01} + 1%| | 6/520 [00:31<35:48, 4.18s/it] 1%|▏ | 7/520 [00:34<34:13, 4.00s/it] {'loss': 1.7084, 'grad_norm': 0.0024473661584056604, 'learning_rate': 0.043750000000000004, 'epoch': 0.01} + 1%|▏ | 7/520 [00:34<34:13, 4.00s/it] 2%|▏ | 8/520 [00:38<34:50, 4.08s/it] {'loss': 1.6025, 'grad_norm': 0.001320607598426331, 'learning_rate': 0.05, 'epoch': 0.02} + 2%|▏ | 8/520 [00:38<34:50, 4.08s/it] 2%|▏ | 9/520 [00:43<35:41, 4.19s/it] {'loss': 1.6498, 'grad_norm': 0.000783661038123087, 'learning_rate': 0.05625, 'epoch': 0.02} + 2%|▏ | 9/520 [00:43<35:41, 4.19s/it] 2%|▏ | 10/520 [00:47<34:39, 4.08s/it] {'loss': 1.5049, 'grad_norm': 0.0007692698829003232, 'learning_rate': 0.0625, 'epoch': 0.02} + 2%|▏ | 10/520 [00:47<34:39, 4.08s/it] 2%|▏ | 11/520 [00:51<34:09, 4.03s/it] {'loss': 1.5197, 'grad_norm': 0.0005693770511048058, 'learning_rate': 0.06875, 'epoch': 0.02} + 2%|▏ | 11/520 [00:51<34:09, 4.03s/it] 2%|▏ | 12/520 [00:55<34:28, 4.07s/it] {'loss': 1.3891, 'grad_norm': 0.00042803767927544447, 'learning_rate': 0.07500000000000001, 'epoch': 0.02} + 2%|▏ | 12/520 [00:55<34:28, 4.07s/it][2025-10-09 07:40:03,840] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [00:59<35:53, 4.25s/it] {'loss': 1.4851, 'grad_norm': 0.00046419610512611403, 'learning_rate': 0.08125, 'epoch': 0.03} + 2%|▎ | 13/520 [00:59<35:53, 4.25s/it] 3%|▎ | 14/520 [01:03<34:30, 4.09s/it] {'loss': 1.5107, 'grad_norm': 0.0004052613242924856, 'learning_rate': 0.08750000000000001, 'epoch': 0.03} + 3%|▎ | 14/520 [01:03<34:30, 4.09s/it] 3%|▎ | 15/520 [01:07<33:17, 3.96s/it] {'loss': 1.4214, 'grad_norm': 0.00035558259448828977, 'learning_rate': 0.09375, 'epoch': 0.03} + 3%|▎ | 15/520 [01:07<33:17, 3.96s/it] 3%|▎ | 16/520 [01:10<32:23, 3.86s/it] {'loss': 1.3836, 'grad_norm': 0.0003928965017078922, 'learning_rate': 0.1, 'epoch': 0.03} + 3%|▎ | 16/520 [01:10<32:23, 3.86s/it] 3%|▎ | 17/520 [01:14<31:52, 3.80s/it] {'loss': 1.5214, 'grad_norm': 0.00042730685706965534, 'learning_rate': 0.0999990286465769, 'epoch': 0.03} + 3%|▎ | 17/520 [01:14<31:52, 3.80s/it] 3%|▎ | 18/520 [01:18<31:19, 3.74s/it] {'loss': 1.3867, 'grad_norm': 0.0005060763304522127, 'learning_rate': 0.09999611462404874, 'epoch': 0.03} + 3%|▎ | 18/520 [01:18<31:19, 3.74s/it] 4%|▎ | 19/520 [01:21<31:14, 3.74s/it] {'loss': 1.3713, 'grad_norm': 0.0004207693780708949, 'learning_rate': 0.09999125804563733, 'epoch': 0.04} + 4%|▎ | 19/520 [01:21<31:14, 3.74s/it] 4%|▍ | 20/520 [01:25<30:54, 3.71s/it] {'loss': 1.3562, 'grad_norm': 0.0005260218345100212, 'learning_rate': 0.09998445910004082, 'epoch': 0.04} + 4%|▍ | 20/520 [01:25<30:54, 3.71s/it] 4%|▍ | 21/520 [01:29<30:59, 3.73s/it] {'loss': 1.3648, 'grad_norm': 0.0005176266346576527, 'learning_rate': 0.09997571805142638, 'epoch': 0.04} + 4%|▍ | 21/520 [01:29<30:59, 3.73s/it] 4%|▍ | 22/520 [01:32<30:44, 3.70s/it] {'loss': 1.4863, 'grad_norm': 0.0005276003444915787, 'learning_rate': 0.09996503523941994, 'epoch': 0.04} + 4%|▍ | 22/520 [01:32<30:44, 3.70s/it] 4%|▍ | 23/520 [01:36<30:29, 3.68s/it] {'loss': 1.4255, 'grad_norm': 0.0005158842038556928, 'learning_rate': 0.0999524110790929, 'epoch': 0.04} + 4%|▍ | 23/520 [01:36<30:29, 3.68s/it] 5%|▍ | 24/520 [01:40<30:24, 3.68s/it] {'loss': 1.338, 'grad_norm': 0.0005241181304452819, 'learning_rate': 0.09993784606094612, 'epoch': 0.05} + 5%|▍ | 24/520 [01:40<30:24, 3.68s/it] 5%|▍ | 25/520 [01:43<30:14, 3.67s/it] {'loss': 1.4344, 'grad_norm': 0.0006278900917967083, 'learning_rate': 0.09992134075089083, 'epoch': 0.05} + 5%|▍ | 25/520 [01:43<30:14, 3.67s/it] 5%|▌ | 26/520 [01:47<30:05, 3.66s/it] {'loss': 1.362, 'grad_norm': 0.0005685090146962575, 'learning_rate': 0.0999028957902266, 'epoch': 0.05} + 5%|▌ | 26/520 [01:47<30:05, 3.66s/it] 5%|▌ | 27/520 [01:51<29:54, 3.64s/it] {'loss': 1.2913, 'grad_norm': 0.00055843733210521, 'learning_rate': 0.09988251189561645, 'epoch': 0.05} + 5%|▌ | 27/520 [01:51<29:54, 3.64s/it] 5%|▌ | 28/520 [01:54<29:53, 3.65s/it] {'loss': 1.3236, 'grad_norm': 0.0006016943252530421, 'learning_rate': 0.099860189859059, 'epoch': 0.05} + 5%|▌ | 28/520 [01:54<29:53, 3.65s/it] 6%|▌ | 29/520 [01:58<30:10, 3.69s/it] {'loss': 1.338, 'grad_norm': 0.0006143439589518735, 'learning_rate': 0.09983593054785776, 'epoch': 0.06} + 6%|▌ | 29/520 [01:58<30:10, 3.69s/it] 6%|▌ | 30/520 [02:02<30:45, 3.77s/it] {'loss': 1.3947, 'grad_norm': 0.0005540269602711947, 'learning_rate': 0.09980973490458728, 'epoch': 0.06} + 6%|▌ | 30/520 [02:02<30:45, 3.77s/it] 6%|▌ | 31/520 [02:06<30:57, 3.80s/it] {'loss': 1.2972, 'grad_norm': 0.0005368855434648286, 'learning_rate': 0.0997816039470567, 'epoch': 0.06} + 6%|▌ | 31/520 [02:06<30:57, 3.80s/it] 6%|▌ | 32/520 [02:10<31:09, 3.83s/it] {'loss': 1.2128, 'grad_norm': 0.0006113870460261891, 'learning_rate': 0.09975153876827009, 'epoch': 0.06} + 6%|▌ | 32/520 [02:10<31:09, 3.83s/it] 6%|▋ | 33/520 [02:14<31:25, 3.87s/it] {'loss': 1.2902, 'grad_norm': 0.0006640227923913462, 'learning_rate': 0.09971954053638399, 'epoch': 0.06} + 6%|▋ | 33/520 [02:14<31:25, 3.87s/it] 7%|▋ | 34/520 [02:18<31:25, 3.88s/it] {'loss': 1.2861, 'grad_norm': 0.0007128615291724036, 'learning_rate': 0.09968561049466214, 'epoch': 0.07} + 7%|▋ | 34/520 [02:18<31:25, 3.88s/it] 7%|▋ | 35/520 [02:22<31:10, 3.86s/it] {'loss': 1.2993, 'grad_norm': 0.0008183844619090912, 'learning_rate': 0.09964974996142698, 'epoch': 0.07} + 7%|▋ | 35/520 [02:22<31:10, 3.86s/it] 7%|▋ | 36/520 [02:25<30:41, 3.80s/it] {'loss': 1.3892, 'grad_norm': 0.0006744694766830246, 'learning_rate': 0.09961196033000862, 'epoch': 0.07} + 7%|▋ | 36/520 [02:25<30:41, 3.80s/it] 7%|▋ | 37/520 [02:29<30:34, 3.80s/it] {'loss': 1.3614, 'grad_norm': 0.0006749649375791175, 'learning_rate': 0.09957224306869053, 'epoch': 0.07} + 7%|▋ | 37/520 [02:29<30:34, 3.80s/it] 7%|▋ | 38/520 [02:33<30:30, 3.80s/it] {'loss': 1.4504, 'grad_norm': 0.0007207299223942139, 'learning_rate': 0.09953059972065265, 'epoch': 0.07} + 7%|▋ | 38/520 [02:33<30:30, 3.80s/it] 8%|▊ | 39/520 [02:37<30:22, 3.79s/it] {'loss': 1.3249, 'grad_norm': 0.0009020479007900418, 'learning_rate': 0.09948703190391131, 'epoch': 0.07} + 8%|▊ | 39/520 [02:37<30:22, 3.79s/it] 8%|▊ | 40/520 [02:40<30:21, 3.79s/it] {'loss': 1.3488, 'grad_norm': 0.0006795406742816625, 'learning_rate': 0.09944154131125643, 'epoch': 0.08} + 8%|▊ | 40/520 [02:40<30:21, 3.79s/it] 8%|▊ | 41/520 [02:44<30:17, 3.79s/it] {'loss': 1.3247, 'grad_norm': 0.0007846200220083442, 'learning_rate': 0.09939412971018574, 'epoch': 0.08} + 8%|▊ | 41/520 [02:44<30:17, 3.79s/it] 8%|▊ | 42/520 [02:48<30:10, 3.79s/it] {'loss': 1.3187, 'grad_norm': 0.0010423929185088374, 'learning_rate': 0.09934479894283606, 'epoch': 0.08} + 8%|▊ | 42/520 [02:48<30:10, 3.79s/it] 8%|▊ | 43/520 [02:52<30:18, 3.81s/it] {'loss': 1.2508, 'grad_norm': 0.00078463983395651, 'learning_rate': 0.0992935509259118, 'epoch': 0.08} + 8%|▊ | 43/520 [02:52<30:18, 3.81s/it] 8%|▊ | 44/520 [02:56<30:09, 3.80s/it] {'loss': 1.3437, 'grad_norm': 0.0008956203602493442, 'learning_rate': 0.09924038765061041, 'epoch': 0.08} + 8%|▊ | 44/520 [02:56<30:09, 3.80s/it] 9%|▊ | 45/520 [02:59<30:05, 3.80s/it] {'loss': 1.3437, 'grad_norm': 0.0009178761688361453, 'learning_rate': 0.09918531118254507, 'epoch': 0.09} + 9%|▊ | 45/520 [02:59<30:05, 3.80s/it] 9%|▉ | 46/520 [03:03<29:57, 3.79s/it] {'loss': 1.3892, 'grad_norm': 0.0009177096782314997, 'learning_rate': 0.09912832366166442, 'epoch': 0.09} + 9%|▉ | 46/520 [03:03<29:57, 3.79s/it] 9%|▉ | 47/520 [03:07<29:31, 3.75s/it] {'loss': 1.3025, 'grad_norm': 0.0009302845998493256, 'learning_rate': 0.09906942730216939, 'epoch': 0.09} + 9%|▉ | 47/520 [03:07<29:31, 3.75s/it] 9%|▉ | 48/520 [03:10<29:20, 3.73s/it] {'loss': 1.3144, 'grad_norm': 0.001126274469761391, 'learning_rate': 0.09900862439242719, 'epoch': 0.09} + 9%|▉ | 48/520 [03:10<29:20, 3.73s/it] 9%|▉ | 49/520 [03:14<29:08, 3.71s/it] {'loss': 1.3367, 'grad_norm': 0.0009667872992247774, 'learning_rate': 0.09894591729488243, 'epoch': 0.09} + 9%|▉ | 49/520 [03:14<29:08, 3.71s/it] 10%|▉ | 50/520 [03:18<28:53, 3.69s/it] {'loss': 1.3317, 'grad_norm': 0.0009670693982133395, 'learning_rate': 0.09888130844596524, 'epoch': 0.1} + 10%|▉ | 50/520 [03:18<28:53, 3.69s/it] 10%|▉ | 51/520 [03:21<28:53, 3.70s/it] {'loss': 1.2696, 'grad_norm': 0.0010713303466733535, 'learning_rate': 0.09881480035599667, 'epoch': 0.1} + 10%|▉ | 51/520 [03:21<28:53, 3.70s/it] 10%|█ | 52/520 [03:25<28:50, 3.70s/it] {'loss': 1.4011, 'grad_norm': 0.0011001353503679204, 'learning_rate': 0.09874639560909118, 'epoch': 0.1} + 10%|█ | 52/520 [03:25<28:50, 3.70s/it] 10%|█ | 53/520 [03:29<28:49, 3.70s/it] {'loss': 1.3725, 'grad_norm': 0.0010257948865571808, 'learning_rate': 0.09867609686305617, 'epoch': 0.1} + 10%|█ | 53/520 [03:29<28:49, 3.70s/it] 10%|█ | 54/520 [03:33<28:40, 3.69s/it] {'loss': 1.3136, 'grad_norm': 0.0010149446166003867, 'learning_rate': 0.09860390684928873, 'epoch': 0.1} + 10%|█ | 54/520 [03:33<28:40, 3.69s/it] 11%|█ | 55/520 [03:36<28:38, 3.70s/it] {'loss': 1.2601, 'grad_norm': 0.0011014289406276973, 'learning_rate': 0.09852982837266955, 'epoch': 0.11} + 11%|█ | 55/520 [03:36<28:38, 3.70s/it] 11%|█ | 56/520 [03:40<28:38, 3.70s/it] {'loss': 1.3873, 'grad_norm': 0.001033397478450364, 'learning_rate': 0.0984538643114539, 'epoch': 0.11} + 11%|█ | 56/520 [03:40<28:38, 3.70s/it] 11%|█ | 57/520 [03:44<28:26, 3.69s/it] {'loss': 1.2595, 'grad_norm': 0.0012034451090235146, 'learning_rate': 0.09837601761715982, 'epoch': 0.11} + 11%|█ | 57/520 [03:44<28:26, 3.69s/it] 11%|█ | 58/520 [03:47<28:31, 3.71s/it] {'loss': 1.4005, 'grad_norm': 0.0009289132180028716, 'learning_rate': 0.09829629131445342, 'epoch': 0.11} + 11%|█ | 58/520 [03:47<28:31, 3.71s/it] 11%|█▏ | 59/520 [03:51<28:42, 3.74s/it] {'loss': 1.2147, 'grad_norm': 0.0010496582399263416, 'learning_rate': 0.09821468850103139, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:51<28:42, 3.74s/it] 12%|█▏ | 60/520 [03:55<28:44, 3.75s/it] {'loss': 1.313, 'grad_norm': 0.0010073132793201526, 'learning_rate': 0.09813121234750061, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:55<28:44, 3.75s/it] 12%|█▏ | 61/520 [03:59<28:34, 3.74s/it] {'loss': 1.2892, 'grad_norm': 0.001095592393277112, 'learning_rate': 0.09804586609725499, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:59<28:34, 3.74s/it] 12%|█▏ | 62/520 [04:02<28:24, 3.72s/it] {'loss': 1.298, 'grad_norm': 0.001260565606235231, 'learning_rate': 0.09795865306634939, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:02<28:24, 3.72s/it] 12%|█▏ | 63/520 [04:06<28:12, 3.70s/it] {'loss': 1.2967, 'grad_norm': 0.0011318253637986415, 'learning_rate': 0.09786957664337091, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:06<28:12, 3.70s/it] 12%|█▏ | 64/520 [04:10<28:23, 3.74s/it] {'loss': 1.3155, 'grad_norm': 0.0011512623380389559, 'learning_rate': 0.09777864028930705, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:10<28:23, 3.74s/it] 12%|█▎ | 65/520 [04:14<28:16, 3.73s/it] {'loss': 1.3235, 'grad_norm': 0.0013877981990146875, 'learning_rate': 0.09768584753741134, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:14<28:16, 3.73s/it] 13%|█▎ | 66/520 [04:17<28:03, 3.71s/it] {'loss': 1.2658, 'grad_norm': 0.0010811382627913461, 'learning_rate': 0.09759120199306613, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:17<28:03, 3.71s/it] 13%|█▎ | 67/520 [04:21<27:59, 3.71s/it] {'loss': 1.1872, 'grad_norm': 0.0011667927136238495, 'learning_rate': 0.0974947073336423, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:21<27:59, 3.71s/it] 13%|█▎ | 68/520 [04:25<27:50, 3.69s/it] {'loss': 1.2549, 'grad_norm': 0.0011084118749291115, 'learning_rate': 0.0973963673083566, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:25<27:50, 3.69s/it] 13%|█▎ | 69/520 [04:28<27:47, 3.70s/it] {'loss': 1.2338, 'grad_norm': 0.0012106627498979236, 'learning_rate': 0.0972961857381258, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:28<27:47, 3.70s/it] 13%|█▎ | 70/520 [04:32<27:41, 3.69s/it] {'loss': 1.2609, 'grad_norm': 0.0012756036154162628, 'learning_rate': 0.09719416651541839, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:32<27:41, 3.69s/it] 14%|█▎ | 71/520 [04:36<27:37, 3.69s/it] {'loss': 1.2041, 'grad_norm': 0.0011173410855358967, 'learning_rate': 0.09709031360410318, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:36<27:37, 3.69s/it] 14%|█▍ | 72/520 [04:39<27:32, 3.69s/it] {'loss': 1.3528, 'grad_norm': 0.0011694989458495672, 'learning_rate': 0.09698463103929543, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:39<27:32, 3.69s/it] 14%|█▍ | 73/520 [04:43<27:31, 3.69s/it] {'loss': 1.1815, 'grad_norm': 0.00125619694849118, 'learning_rate': 0.09687712292719997, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:43<27:31, 3.69s/it] 14%|█▍ | 74/520 [04:47<27:22, 3.68s/it] {'loss': 1.2821, 'grad_norm': 0.001308228088027997, 'learning_rate': 0.0967677934449517, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:47<27:22, 3.68s/it] 14%|█▍ | 75/520 [04:50<27:16, 3.68s/it] {'loss': 1.2066, 'grad_norm': 0.0011109164070926836, 'learning_rate': 0.09665664684045333, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:50<27:16, 3.68s/it] 15%|█▍ | 76/520 [04:54<27:15, 3.68s/it] {'loss': 1.3256, 'grad_norm': 0.0010105152011200902, 'learning_rate': 0.09654368743221022, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:54<27:15, 3.68s/it] 15%|█▍ | 77/520 [04:58<27:05, 3.67s/it] {'loss': 1.138, 'grad_norm': 0.001407968010540688, 'learning_rate': 0.09642891960916268, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:58<27:05, 3.67s/it] 15%|█▌ | 78/520 [05:01<27:12, 3.69s/it] {'loss': 1.2409, 'grad_norm': 0.0012326717358839094, 'learning_rate': 0.09631234783051544, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:01<27:12, 3.69s/it] 15%|█▌ | 79/520 [05:05<27:08, 3.69s/it] {'loss': 1.2231, 'grad_norm': 0.001180319755793147, 'learning_rate': 0.09619397662556434, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:05<27:08, 3.69s/it] 15%|█▌ | 80/520 [05:09<27:01, 3.69s/it] {'loss': 1.3062, 'grad_norm': 0.001238143992257528, 'learning_rate': 0.09607381059352038, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:09<27:01, 3.69s/it] 16%|█▌ | 81/520 [05:13<27:06, 3.70s/it] {'loss': 1.362, 'grad_norm': 0.0015609408667878434, 'learning_rate': 0.09595185440333104, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:13<27:06, 3.70s/it] 16%|█▌ | 82/520 [05:16<27:17, 3.74s/it] {'loss': 1.2912, 'grad_norm': 0.0012537366361840534, 'learning_rate': 0.09582811279349882, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:16<27:17, 3.74s/it] 16%|█▌ | 83/520 [05:20<27:27, 3.77s/it] {'loss': 1.3022, 'grad_norm': 0.0013552157689524582, 'learning_rate': 0.09570259057189717, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:20<27:27, 3.77s/it] 16%|█▌ | 84/520 [05:24<27:27, 3.78s/it] {'loss': 1.3143, 'grad_norm': 0.001351431247731067, 'learning_rate': 0.09557529261558367, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:24<27:27, 3.78s/it] 16%|█▋ | 85/520 [05:28<27:10, 3.75s/it] {'loss': 1.3532, 'grad_norm': 0.0012719065685720253, 'learning_rate': 0.09544622387061055, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:28<27:10, 3.75s/it] 17%|█▋ | 86/520 [05:31<27:02, 3.74s/it] {'loss': 1.3429, 'grad_norm': 0.0012620080062841202, 'learning_rate': 0.09531538935183251, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:31<27:02, 3.74s/it] 17%|█▋ | 87/520 [05:35<26:56, 3.73s/it] {'loss': 1.2588, 'grad_norm': 0.0012396269596353358, 'learning_rate': 0.09518279414271184, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:35<26:56, 3.73s/it] 17%|█▋ | 88/520 [05:39<27:08, 3.77s/it] {'loss': 1.2007, 'grad_norm': 0.0009789544619873474, 'learning_rate': 0.09504844339512096, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:39<27:08, 3.77s/it] 17%|█▋ | 89/520 [05:43<27:06, 3.77s/it] {'loss': 1.3019, 'grad_norm': 0.0013418411760750689, 'learning_rate': 0.09491234232914221, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:43<27:06, 3.77s/it] 17%|█▋ | 90/520 [05:46<26:47, 3.74s/it] {'loss': 1.2356, 'grad_norm': 0.0012531640382589325, 'learning_rate': 0.09477449623286505, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:46<26:47, 3.74s/it] 18%|█▊ | 91/520 [05:50<26:38, 3.73s/it] {'loss': 1.3029, 'grad_norm': 0.0011870527266465423, 'learning_rate': 0.09463491046218059, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:50<26:38, 3.73s/it] 18%|█▊ | 92/520 [05:54<26:48, 3.76s/it] {'loss': 1.2445, 'grad_norm': 0.0013284673482529233, 'learning_rate': 0.09449359044057344, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:54<26:48, 3.76s/it] 18%|█▊ | 93/520 [05:58<26:51, 3.77s/it] {'loss': 1.2543, 'grad_norm': 0.0013419339544954436, 'learning_rate': 0.09435054165891109, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:58<26:51, 3.77s/it] 18%|█▊ | 94/520 [06:02<26:49, 3.78s/it] {'loss': 1.3337, 'grad_norm': 0.0012820478890686736, 'learning_rate': 0.09420576967523049, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:02<26:49, 3.78s/it] 18%|█▊ | 95/520 [06:05<26:37, 3.76s/it] {'loss': 1.235, 'grad_norm': 0.0014689961102055187, 'learning_rate': 0.09405928011452212, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:05<26:37, 3.76s/it] 18%|█▊ | 96/520 [06:09<26:34, 3.76s/it] {'loss': 1.2586, 'grad_norm': 0.0011583243169151767, 'learning_rate': 0.09391107866851144, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:09<26:34, 3.76s/it] 19%|█▊ | 97/520 [06:13<26:23, 3.74s/it] {'loss': 1.2311, 'grad_norm': 0.0014775494431583838, 'learning_rate': 0.09376117109543769, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:13<26:23, 3.74s/it] 19%|█▉ | 98/520 [06:16<26:08, 3.72s/it] {'loss': 1.2326, 'grad_norm': 0.0010958328151545967, 'learning_rate': 0.09360956321983027, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:16<26:08, 3.72s/it] 19%|█▉ | 99/520 [06:20<25:59, 3.70s/it] {'loss': 1.2255, 'grad_norm': 0.0013345565451790846, 'learning_rate': 0.09345626093228232, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:20<25:59, 3.70s/it] 19%|█▉ | 100/520 [06:24<25:54, 3.70s/it] {'loss': 1.211, 'grad_norm': 0.0011808486011541826, 'learning_rate': 0.09330127018922195, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:24<25:54, 3.70s/it] 19%|█▉ | 101/520 [06:27<25:45, 3.69s/it] {'loss': 1.2509, 'grad_norm': 0.0013331870288650729, 'learning_rate': 0.09314459701268066, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:27<25:45, 3.69s/it] 20%|█▉ | 102/520 [06:31<25:48, 3.70s/it] {'loss': 1.246, 'grad_norm': 0.0013482574231790069, 'learning_rate': 0.09298624749005951, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:31<25:48, 3.70s/it] 20%|█▉ | 103/520 [06:35<25:41, 3.70s/it] {'loss': 1.1892, 'grad_norm': 0.001187255521120653, 'learning_rate': 0.09282622777389259, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:35<25:41, 3.70s/it] 20%|██ | 104/520 [06:38<25:32, 3.68s/it] {'loss': 1.2575, 'grad_norm': 0.0013337683353476005, 'learning_rate': 0.09266454408160779, 'epoch': 0.2} + 20%|██ | 104/520 [06:38<25:32, 3.68s/it] 20%|██ | 105/520 [06:42<25:31, 3.69s/it] {'loss': 1.2486, 'grad_norm': 0.0012432358223182195, 'learning_rate': 0.09250120269528546, 'epoch': 0.2} + 20%|██ | 105/520 [06:42<25:31, 3.69s/it] 20%|██ | 106/520 [06:46<25:44, 3.73s/it] {'loss': 1.2344, 'grad_norm': 0.0011398432782634433, 'learning_rate': 0.09233620996141421, 'epoch': 0.2} + 20%|██ | 106/520 [06:46<25:44, 3.73s/it] 21%|██ | 107/520 [06:50<25:50, 3.75s/it] {'loss': 1.2093, 'grad_norm': 0.0012251078991376864, 'learning_rate': 0.09216957229064429, 'epoch': 0.21} + 21%|██ | 107/520 [06:50<25:50, 3.75s/it] 21%|██ | 108/520 [06:54<25:52, 3.77s/it] {'loss': 1.205, 'grad_norm': 0.0013032344495302892, 'learning_rate': 0.09200129615753859, 'epoch': 0.21} + 21%|██ | 108/520 [06:54<25:52, 3.77s/it] 21%|██ | 109/520 [06:57<25:34, 3.73s/it] {'loss': 1.1841, 'grad_norm': 0.0011193463692520172, 'learning_rate': 0.09183138810032099, 'epoch': 0.21} + 21%|██ | 109/520 [06:57<25:34, 3.73s/it] 21%|██ | 110/520 [07:01<25:25, 3.72s/it] {'loss': 1.3779, 'grad_norm': 0.0013309315992923705, 'learning_rate': 0.09165985472062245, 'epoch': 0.21} + 21%|██ | 110/520 [07:01<25:25, 3.72s/it] 21%|██▏ | 111/520 [07:05<25:16, 3.71s/it] {'loss': 1.3722, 'grad_norm': 0.0013809893930157243, 'learning_rate': 0.09148670268322438, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:05<25:16, 3.71s/it] 22%|██▏ | 112/520 [07:08<25:08, 3.70s/it] {'loss': 1.2684, 'grad_norm': 0.0012751458315227852, 'learning_rate': 0.09131193871579975, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:08<25:08, 3.70s/it] 22%|██▏ | 113/520 [07:12<25:06, 3.70s/it] {'loss': 1.1629, 'grad_norm': 0.0012137911760780973, 'learning_rate': 0.09113556960865167, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:12<25:06, 3.70s/it] 22%|██▏ | 114/520 [07:16<24:58, 3.69s/it] {'loss': 1.2583, 'grad_norm': 0.0012617508539901068, 'learning_rate': 0.0909576022144496, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:16<24:58, 3.69s/it] 22%|██▏ | 115/520 [07:19<24:58, 3.70s/it] {'loss': 1.3436, 'grad_norm': 0.001287737184828616, 'learning_rate': 0.09077804344796302, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:19<24:58, 3.70s/it] 22%|██▏ | 116/520 [07:23<25:09, 3.74s/it] {'loss': 1.3601, 'grad_norm': 0.0012288961374715469, 'learning_rate': 0.09059690028579284, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:23<25:09, 3.74s/it] 22%|██▎ | 117/520 [07:27<25:14, 3.76s/it] {'loss': 1.3194, 'grad_norm': 0.0013175587317526694, 'learning_rate': 0.09041417976610028, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:27<25:14, 3.76s/it] 23%|██▎ | 118/520 [07:31<25:15, 3.77s/it] {'loss': 1.2565, 'grad_norm': 0.001237071832037953, 'learning_rate': 0.09022988898833342, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:31<25:15, 3.77s/it] 23%|██▎ | 119/520 [07:35<25:18, 3.79s/it] {'loss': 1.2078, 'grad_norm': 0.0013163757245730433, 'learning_rate': 0.0900440351129514, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:35<25:18, 3.79s/it] 23%|██▎ | 120/520 [07:38<25:13, 3.78s/it] {'loss': 1.2167, 'grad_norm': 0.0014260755472991096, 'learning_rate': 0.08985662536114614, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:38<25:13, 3.78s/it] 23%|██▎ | 121/520 [07:42<25:14, 3.80s/it] {'loss': 1.2627, 'grad_norm': 0.0013282636715006132, 'learning_rate': 0.08966766701456176, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:42<25:14, 3.80s/it] 23%|██▎ | 122/520 [07:46<25:09, 3.79s/it] {'loss': 1.1837, 'grad_norm': 0.001219418311296233, 'learning_rate': 0.08947716741501177, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:46<25:09, 3.79s/it] 24%|██▎ | 123/520 [07:50<25:15, 3.82s/it] {'loss': 1.2726, 'grad_norm': 0.001260437811111303, 'learning_rate': 0.08928513396419369, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:50<25:15, 3.82s/it] 24%|██▍ | 124/520 [07:54<25:10, 3.81s/it] {'loss': 1.2304, 'grad_norm': 0.0013892453093463878, 'learning_rate': 0.0890915741234015, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:54<25:10, 3.81s/it] 24%|██▍ | 125/520 [07:58<25:05, 3.81s/it] {'loss': 1.2329, 'grad_norm': 0.0012766004880977201, 'learning_rate': 0.08889649541323574, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:58<25:05, 3.81s/it] 24%|██▍ | 126/520 [08:02<26:18, 4.01s/it] {'loss': 1.2053, 'grad_norm': 0.0010728080898922093, 'learning_rate': 0.08869990541331138, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:02<26:18, 4.01s/it] 24%|██▍ | 127/520 [08:06<25:37, 3.91s/it] {'loss': 1.2189, 'grad_norm': 0.0014601155570460177, 'learning_rate': 0.08850181176196315, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:06<25:37, 3.91s/it] 25%|██▍ | 128/520 [08:10<25:20, 3.88s/it] {'loss': 1.2549, 'grad_norm': 0.0012875887609717532, 'learning_rate': 0.0883022221559489, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:10<25:20, 3.88s/it] 25%|██▍ | 129/520 [08:13<25:01, 3.84s/it] {'loss': 1.2271, 'grad_norm': 0.0012101096402377092, 'learning_rate': 0.08810114435015054, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:13<25:01, 3.84s/it] 25%|██▌ | 130/520 [08:17<24:48, 3.82s/it] {'loss': 1.2454, 'grad_norm': 0.0011675523704351536, 'learning_rate': 0.08789858615727265, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:17<24:48, 3.82s/it] 25%|██▌ | 131/520 [08:21<24:47, 3.82s/it] {'loss': 1.1724, 'grad_norm': 0.0011189796114130306, 'learning_rate': 0.087694555447539, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:21<24:47, 3.82s/it] 25%|██▌ | 132/520 [08:25<24:24, 3.77s/it] {'loss': 1.2972, 'grad_norm': 0.001453765576573467, 'learning_rate': 0.08748906014838671, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:25<24:24, 3.77s/it] 26%|██▌ | 133/520 [08:28<24:11, 3.75s/it] {'loss': 1.2134, 'grad_norm': 0.001407781094804851, 'learning_rate': 0.08728210824415827, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:28<24:11, 3.75s/it] 26%|██▌ | 134/520 [08:32<24:08, 3.75s/it] {'loss': 1.2817, 'grad_norm': 0.001282588985067607, 'learning_rate': 0.08707370777579133, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:32<24:08, 3.75s/it] 26%|██▌ | 135/520 [08:36<23:54, 3.73s/it] {'loss': 1.3375, 'grad_norm': 0.0013864184267435194, 'learning_rate': 0.0868638668405062, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:36<23:54, 3.73s/it] 26%|██▌ | 136/520 [08:39<23:59, 3.75s/it] {'loss': 1.2943, 'grad_norm': 0.0013318865504277457, 'learning_rate': 0.08665259359149131, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:39<23:59, 3.75s/it] 26%|██▋ | 137/520 [08:43<24:03, 3.77s/it] {'loss': 1.208, 'grad_norm': 0.0014765648532901563, 'learning_rate': 0.08643989623758642, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:43<24:03, 3.77s/it] 27%|██▋ | 138/520 [08:47<23:43, 3.73s/it] {'loss': 1.2204, 'grad_norm': 0.0011892484088810288, 'learning_rate': 0.08622578304296363, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:47<23:43, 3.73s/it] 27%|██▋ | 139/520 [08:51<23:32, 3.71s/it] {'loss': 1.0863, 'grad_norm': 0.0011288442844000581, 'learning_rate': 0.08601026232680634, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:51<23:32, 3.71s/it] 27%|██▋ | 140/520 [08:54<23:25, 3.70s/it] {'loss': 1.2282, 'grad_norm': 0.0011388622245718182, 'learning_rate': 0.08579334246298592, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:54<23:25, 3.70s/it] 27%|██▋ | 141/520 [08:58<23:14, 3.68s/it] {'loss': 1.3159, 'grad_norm': 0.0011955653065177727, 'learning_rate': 0.08557503187973652, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:58<23:14, 3.68s/it] 27%|██▋ | 142/520 [09:02<23:28, 3.73s/it] {'loss': 1.2309, 'grad_norm': 0.0011319527859700578, 'learning_rate': 0.08535533905932738, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:02<23:28, 3.73s/it] 28%|██▊ | 143/520 [09:06<23:35, 3.75s/it] {'loss': 1.2438, 'grad_norm': 0.0013581593867752768, 'learning_rate': 0.08513427253773347, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:06<23:35, 3.75s/it] 28%|██▊ | 144/520 [09:09<23:32, 3.76s/it] {'loss': 1.2285, 'grad_norm': 0.0014073502278291997, 'learning_rate': 0.08491184090430365, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:09<23:32, 3.76s/it] 28%|██▊ | 145/520 [09:13<23:22, 3.74s/it] {'loss': 1.1503, 'grad_norm': 0.0011978058017663542, 'learning_rate': 0.08468805280142709, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:13<23:22, 3.74s/it] 28%|██▊ | 146/520 [09:17<23:07, 3.71s/it] {'loss': 1.2821, 'grad_norm': 0.0012680708733154072, 'learning_rate': 0.08446291692419736, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:17<23:07, 3.71s/it] 28%|██▊ | 147/520 [09:20<22:55, 3.69s/it] {'loss': 1.1958, 'grad_norm': 0.0013284789332094168, 'learning_rate': 0.08423644202007469, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:20<22:55, 3.69s/it] 28%|██▊ | 148/520 [09:24<22:53, 3.69s/it] {'loss': 1.2188, 'grad_norm': 0.0012040526524123183, 'learning_rate': 0.08400863688854597, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:24<22:53, 3.69s/it] 29%|██▊ | 149/520 [09:28<22:41, 3.67s/it] {'loss': 1.1615, 'grad_norm': 0.0012496309176728585, 'learning_rate': 0.08377951038078302, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:28<22:41, 3.67s/it] 29%|██▉ | 150/520 [09:31<22:35, 3.66s/it] {'loss': 1.3746, 'grad_norm': 0.0012726519677792184, 'learning_rate': 0.08354907139929851, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:31<22:35, 3.66s/it] 29%|██▉ | 151/520 [09:35<22:34, 3.67s/it] {'loss': 1.2115, 'grad_norm': 0.0013293065132342493, 'learning_rate': 0.0833173288976002, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:35<22:34, 3.67s/it] 29%|██▉ | 152/520 [09:39<22:30, 3.67s/it] {'loss': 1.18, 'grad_norm': 0.0013259833320429534, 'learning_rate': 0.08308429187984298, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:39<22:30, 3.67s/it] 29%|██▉ | 153/520 [09:42<22:24, 3.66s/it] {'loss': 1.2094, 'grad_norm': 0.0012600393196890883, 'learning_rate': 0.08284996940047903, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:42<22:24, 3.66s/it] 30%|██▉ | 154/520 [09:46<22:21, 3.67s/it] {'loss': 1.2921, 'grad_norm': 0.001206864285482727, 'learning_rate': 0.08261437056390607, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:46<22:21, 3.67s/it] 30%|██▉ | 155/520 [09:50<22:16, 3.66s/it] {'loss': 1.208, 'grad_norm': 0.0013448986122933052, 'learning_rate': 0.08237750452411352, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:50<22:16, 3.66s/it] 30%|███ | 156/520 [09:53<22:21, 3.69s/it] {'loss': 1.2323, 'grad_norm': 0.0014096015214589016, 'learning_rate': 0.08213938048432697, 'epoch': 0.3} + 30%|███ | 156/520 [09:53<22:21, 3.69s/it] 30%|███ | 157/520 [09:57<22:16, 3.68s/it] {'loss': 1.2735, 'grad_norm': 0.001195596879366164, 'learning_rate': 0.08190000769665044, 'epoch': 0.3} + 30%|███ | 157/520 [09:57<22:16, 3.68s/it] 30%|███ | 158/520 [10:01<22:22, 3.71s/it] {'loss': 1.2164, 'grad_norm': 0.0012879571776152055, 'learning_rate': 0.081659395461707, 'epoch': 0.3} + 30%|███ | 158/520 [10:01<22:22, 3.71s/it] 31%|███ | 159/520 [10:05<22:34, 3.75s/it] {'loss': 1.2631, 'grad_norm': 0.0012878440100823365, 'learning_rate': 0.08141755312827736, 'epoch': 0.31} + 31%|███ | 159/520 [10:05<22:34, 3.75s/it] 31%|███ | 160/520 [10:08<22:38, 3.77s/it] {'loss': 1.2633, 'grad_norm': 0.0013097604200873018, 'learning_rate': 0.08117449009293669, 'epoch': 0.31} + 31%|███ | 160/520 [10:08<22:38, 3.77s/it] 31%|███ | 161/520 [10:12<22:48, 3.81s/it] {'loss': 1.241, 'grad_norm': 0.0012649780045238283, 'learning_rate': 0.08093021579968941, 'epoch': 0.31} + 31%|███ | 161/520 [10:12<22:48, 3.81s/it] 31%|███ | 162/520 [10:16<22:44, 3.81s/it] {'loss': 1.2164, 'grad_norm': 0.0011878837307378203, 'learning_rate': 0.08068473973960238, 'epoch': 0.31} + 31%|███ | 162/520 [10:16<22:44, 3.81s/it] 31%|███▏ | 163/520 [10:20<22:40, 3.81s/it] {'loss': 1.1485, 'grad_norm': 0.0014345074420732454, 'learning_rate': 0.08043807145043604, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:20<22:40, 3.81s/it] 32%|███▏ | 164/520 [10:24<22:41, 3.82s/it] {'loss': 1.1059, 'grad_norm': 0.0012118894270846628, 'learning_rate': 0.08019022051627388, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:24<22:41, 3.82s/it] 32%|███▏ | 165/520 [10:28<22:36, 3.82s/it] {'loss': 1.2647, 'grad_norm': 0.001231636664749837, 'learning_rate': 0.07994119656715003, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:28<22:36, 3.82s/it] 32%|███▏ | 166/520 [10:31<22:31, 3.82s/it] {'loss': 1.2226, 'grad_norm': 0.0013851850855069148, 'learning_rate': 0.07969100927867508, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:31<22:31, 3.82s/it] 32%|███▏ | 167/520 [10:35<22:13, 3.78s/it] {'loss': 1.2202, 'grad_norm': 0.0012688176300093132, 'learning_rate': 0.07943966837166024, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:35<22:13, 3.78s/it] 32%|███▏ | 168/520 [10:39<21:57, 3.74s/it] {'loss': 1.1618, 'grad_norm': 0.0012332674437190395, 'learning_rate': 0.0791871836117395, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:39<21:57, 3.74s/it] 32%|███▎ | 169/520 [10:42<21:47, 3.72s/it] {'loss': 1.2347, 'grad_norm': 0.0012141670949385637, 'learning_rate': 0.0789335648089903, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:42<21:47, 3.72s/it] 33%|███▎ | 170/520 [10:46<21:37, 3.71s/it] {'loss': 1.1767, 'grad_norm': 0.0010577092033014165, 'learning_rate': 0.07867882181755231, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:46<21:37, 3.71s/it] 33%|███▎ | 171/520 [10:50<21:26, 3.69s/it] {'loss': 1.1766, 'grad_norm': 0.001352246731320339, 'learning_rate': 0.07842296453524462, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:50<21:26, 3.69s/it] 33%|███▎ | 172/520 [10:53<21:23, 3.69s/it] {'loss': 1.2495, 'grad_norm': 0.001261312814652003, 'learning_rate': 0.0781660029031811, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:53<21:23, 3.69s/it] 33%|███▎ | 173/520 [10:57<21:20, 3.69s/it] {'loss': 1.1824, 'grad_norm': 0.001252101221629372, 'learning_rate': 0.07790794690538422, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:57<21:20, 3.69s/it] 33%|███▎ | 174/520 [11:01<21:30, 3.73s/it] {'loss': 1.233, 'grad_norm': 0.0012810516002719596, 'learning_rate': 0.07764880656839697, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:01<21:30, 3.73s/it] 34%|███▎ | 175/520 [11:05<21:34, 3.75s/it] {'loss': 1.1534, 'grad_norm': 0.0011862694735883391, 'learning_rate': 0.07738859196089358, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:05<21:34, 3.75s/it] 34%|███▍ | 176/520 [11:09<21:39, 3.78s/it] {'loss': 1.2472, 'grad_norm': 0.0012421783337581997, 'learning_rate': 0.07712731319328797, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:09<21:39, 3.78s/it] 34%|███▍ | 177/520 [11:12<21:26, 3.75s/it] {'loss': 1.13, 'grad_norm': 0.00124972764391989, 'learning_rate': 0.0768649804173412, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:12<21:26, 3.75s/it] 34%|███▍ | 178/520 [11:16<21:11, 3.72s/it] {'loss': 1.2235, 'grad_norm': 0.001356245583263552, 'learning_rate': 0.07660160382576683, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:16<21:11, 3.72s/it] 34%|███▍ | 179/520 [11:20<20:57, 3.69s/it] {'loss': 1.295, 'grad_norm': 0.0012149036202491338, 'learning_rate': 0.07633719365183504, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:20<20:57, 3.69s/it] 35%|███▍ | 180/520 [11:23<20:46, 3.67s/it] {'loss': 1.2099, 'grad_norm': 0.001281760286637025, 'learning_rate': 0.0760717601689749, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:23<20:46, 3.67s/it] 35%|███▍ | 181/520 [11:27<20:40, 3.66s/it] {'loss': 1.1948, 'grad_norm': 0.0011166033309218705, 'learning_rate': 0.07580531369037534, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:27<20:40, 3.66s/it] 35%|███▌ | 182/520 [11:30<20:36, 3.66s/it] {'loss': 1.2067, 'grad_norm': 0.0013256755252534366, 'learning_rate': 0.0755378645685843, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:30<20:36, 3.66s/it] 35%|███▌ | 183/520 [11:34<20:34, 3.66s/it] {'loss': 1.2224, 'grad_norm': 0.0012262810860454647, 'learning_rate': 0.07526942319510654, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:34<20:34, 3.66s/it] 35%|███▌ | 184/520 [11:38<20:27, 3.65s/it] {'loss': 1.1648, 'grad_norm': 0.0013272443712565518, 'learning_rate': 0.07500000000000001, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:38<20:27, 3.65s/it] 36%|███▌ | 185/520 [11:41<20:26, 3.66s/it] {'loss': 1.2899, 'grad_norm': 0.0012331779815146794, 'learning_rate': 0.07472960545147038, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:41<20:26, 3.66s/it] 36%|███▌ | 186/520 [11:45<20:19, 3.65s/it] {'loss': 1.1877, 'grad_norm': 0.001305060728743968, 'learning_rate': 0.07445825005546447, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:45<20:19, 3.65s/it] 36%|███▌ | 187/520 [11:49<20:12, 3.64s/it] {'loss': 1.1825, 'grad_norm': 0.0014592553498532948, 'learning_rate': 0.07418594435526199, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:49<20:12, 3.64s/it] 36%|███▌ | 188/520 [11:52<20:11, 3.65s/it] {'loss': 1.2759, 'grad_norm': 0.001311337442402482, 'learning_rate': 0.07391269893106592, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:52<20:11, 3.65s/it] 36%|███▋ | 189/520 [11:56<20:06, 3.64s/it] {'loss': 1.2711, 'grad_norm': 0.0011656452770394572, 'learning_rate': 0.07363852439959136, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:56<20:06, 3.64s/it] 37%|███▋ | 190/520 [12:00<20:02, 3.64s/it] {'loss': 1.1968, 'grad_norm': 0.0013591654838865594, 'learning_rate': 0.0733634314136531, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:00<20:02, 3.64s/it] 37%|███▋ | 191/520 [12:03<19:59, 3.65s/it] {'loss': 1.1589, 'grad_norm': 0.0012259475749336237, 'learning_rate': 0.0730874306617517, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:03<19:59, 3.65s/it] 37%|███▋ | 192/520 [12:07<19:58, 3.65s/it] {'loss': 1.2355, 'grad_norm': 0.0012210648535689135, 'learning_rate': 0.07281053286765815, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:07<19:58, 3.65s/it] 37%|███▋ | 193/520 [12:11<19:56, 3.66s/it] {'loss': 1.1828, 'grad_norm': 0.0013372791285532425, 'learning_rate': 0.07253274878999727, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:11<19:56, 3.66s/it] 37%|███▋ | 194/520 [12:14<19:53, 3.66s/it] {'loss': 1.0787, 'grad_norm': 0.0011089322906780226, 'learning_rate': 0.07225408922182962, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:14<19:53, 3.66s/it] 38%|███▊ | 195/520 [12:18<19:44, 3.64s/it] {'loss': 1.2507, 'grad_norm': 0.0012380929384097183, 'learning_rate': 0.07197456499023225, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:18<19:44, 3.64s/it] 38%|███▊ | 196/520 [12:22<19:46, 3.66s/it] {'loss': 1.2372, 'grad_norm': 0.0014337578959909261, 'learning_rate': 0.07169418695587791, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:22<19:46, 3.66s/it] 38%|███▊ | 197/520 [12:25<19:50, 3.68s/it] {'loss': 1.1819, 'grad_norm': 0.0012915964159194238, 'learning_rate': 0.07141296601261314, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:25<19:50, 3.68s/it] 38%|███▊ | 198/520 [12:29<19:44, 3.68s/it] {'loss': 1.2463, 'grad_norm': 0.001349413511926681, 'learning_rate': 0.07113091308703498, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:29<19:44, 3.68s/it] 38%|███▊ | 199/520 [12:33<19:44, 3.69s/it] {'loss': 1.1641, 'grad_norm': 0.001281703702243751, 'learning_rate': 0.07084803913806641, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:33<19:44, 3.69s/it] 38%|███▊ | 200/520 [12:36<19:41, 3.69s/it] {'loss': 1.1404, 'grad_norm': 0.0012365347776786737, 'learning_rate': 0.07056435515653059, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:36<19:41, 3.69s/it] 39%|███▊ | 201/520 [12:40<19:42, 3.71s/it] {'loss': 1.1578, 'grad_norm': 0.0011010601271146112, 'learning_rate': 0.07027987216472376, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:40<19:42, 3.71s/it] 39%|███▉ | 202/520 [12:44<19:33, 3.69s/it] {'loss': 1.1816, 'grad_norm': 0.0012799474204121901, 'learning_rate': 0.06999460121598704, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:44<19:33, 3.69s/it] 39%|███▉ | 203/520 [12:47<19:28, 3.69s/it] {'loss': 1.2197, 'grad_norm': 0.001289045960299798, 'learning_rate': 0.06970855339427698, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:47<19:28, 3.69s/it] 39%|███▉ | 204/520 [12:51<19:26, 3.69s/it] {'loss': 1.2339, 'grad_norm': 0.001305843833846277, 'learning_rate': 0.06942173981373474, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:51<19:26, 3.69s/it] 39%|███▉ | 205/520 [12:55<19:23, 3.69s/it] {'loss': 1.1562, 'grad_norm': 0.0011628238005111922, 'learning_rate': 0.0691341716182545, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:55<19:23, 3.69s/it] 40%|███▉ | 206/520 [12:59<19:20, 3.70s/it] {'loss': 1.2581, 'grad_norm': 0.0012165450508798026, 'learning_rate': 0.06884585998105026, 'epoch': 0.4} + 40%|███▉ | 206/520 [12:59<19:20, 3.70s/it] 40%|███▉ | 207/520 [13:02<19:23, 3.72s/it] {'loss': 1.123, 'grad_norm': 0.0010703106695858005, 'learning_rate': 0.0685568161042219, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:02<19:23, 3.72s/it] 40%|████ | 208/520 [13:06<19:13, 3.70s/it] {'loss': 1.2605, 'grad_norm': 0.0014054424970315223, 'learning_rate': 0.06826705121831976, 'epoch': 0.4} + 40%|████ | 208/520 [13:06<19:13, 3.70s/it] 40%|████ | 209/520 [13:10<19:11, 3.70s/it] {'loss': 1.1762, 'grad_norm': 0.0011984945805590562, 'learning_rate': 0.06797657658190838, 'epoch': 0.4} + 40%|████ | 209/520 [13:10<19:11, 3.70s/it] 40%|████ | 210/520 [13:13<19:05, 3.69s/it] {'loss': 1.2411, 'grad_norm': 0.0012619832267679746, 'learning_rate': 0.06768540348112907, 'epoch': 0.4} + 40%|████ | 210/520 [13:13<19:05, 3.69s/it] 41%|████ | 211/520 [13:17<19:06, 3.71s/it] {'loss': 1.2405, 'grad_norm': 0.0011547477297094767, 'learning_rate': 0.06739354322926136, 'epoch': 0.41} + 41%|████ | 211/520 [13:17<19:06, 3.71s/it] 41%|████ | 212/520 [13:21<19:08, 3.73s/it] {'loss': 1.2431, 'grad_norm': 0.0012233263786085272, 'learning_rate': 0.06710100716628345, 'epoch': 0.41} + 41%|████ | 212/520 [13:21<19:08, 3.73s/it] 41%|████ | 213/520 [13:25<19:03, 3.73s/it] {'loss': 1.1923, 'grad_norm': 0.0014097671672276288, 'learning_rate': 0.06680780665843154, 'epoch': 0.41} + 41%|████ | 213/520 [13:25<19:03, 3.73s/it] 41%|████ | 214/520 [13:28<18:51, 3.70s/it] {'loss': 1.1877, 'grad_norm': 0.0012769274935290724, 'learning_rate': 0.06651395309775836, 'epoch': 0.41} + 41%|████ | 214/520 [13:28<18:51, 3.70s/it] 41%|████▏ | 215/520 [13:32<18:53, 3.72s/it] {'loss': 1.0959, 'grad_norm': 0.0011733570508027824, 'learning_rate': 0.06621945790169036, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:32<18:53, 3.72s/it] 42%|████▏ | 216/520 [13:36<18:51, 3.72s/it] {'loss': 1.1074, 'grad_norm': 0.0012372978145977003, 'learning_rate': 0.06592433251258423, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:36<18:51, 3.72s/it] 42%|████▏ | 217/520 [13:39<18:44, 3.71s/it] {'loss': 1.2384, 'grad_norm': 0.001310053901213507, 'learning_rate': 0.06562858839728224, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:39<18:44, 3.71s/it] 42%|████▏ | 218/520 [13:43<18:33, 3.69s/it] {'loss': 1.2112, 'grad_norm': 0.0014260689443619829, 'learning_rate': 0.06533223704666673, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:43<18:33, 3.69s/it] 42%|████▏ | 219/520 [13:47<18:29, 3.69s/it] {'loss': 1.2313, 'grad_norm': 0.001192680941371589, 'learning_rate': 0.06503528997521366, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:47<18:29, 3.69s/it] 42%|████▏ | 220/520 [13:50<18:23, 3.68s/it] {'loss': 1.1422, 'grad_norm': 0.0011480817279115878, 'learning_rate': 0.06473775872054521, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:50<18:23, 3.68s/it] 42%|████▎ | 221/520 [13:54<18:27, 3.70s/it] {'loss': 1.2216, 'grad_norm': 0.001257367734653576, 'learning_rate': 0.0644396548429815, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:54<18:27, 3.70s/it] 43%|████▎ | 222/520 [13:58<18:15, 3.68s/it] {'loss': 1.1682, 'grad_norm': 0.0012480338238774053, 'learning_rate': 0.06414098992509137, 'epoch': 0.43} + 43%|████▎ | 222/520 [13:58<18:15, 3.68s/it] 43%|████▎ | 223/520 [14:01<18:09, 3.67s/it] {'loss': 1.165, 'grad_norm': 0.0012545046505765725, 'learning_rate': 0.06384177557124247, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:01<18:09, 3.67s/it] 43%|████▎ | 224/520 [14:05<18:13, 3.70s/it] {'loss': 1.1885, 'grad_norm': 0.0010940162105780424, 'learning_rate': 0.06354202340715026, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:05<18:13, 3.70s/it] 43%|████▎ | 225/520 [14:09<18:08, 3.69s/it] {'loss': 1.1637, 'grad_norm': 0.0012819876087563965, 'learning_rate': 0.06324174507942636, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:09<18:08, 3.69s/it] 43%|████▎ | 226/520 [14:13<18:04, 3.69s/it] {'loss': 1.2633, 'grad_norm': 0.0012307745989941436, 'learning_rate': 0.06294095225512604, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:13<18:04, 3.69s/it] 44%|████▎ | 227/520 [14:16<17:59, 3.68s/it] {'loss': 1.247, 'grad_norm': 0.0012081753682415266, 'learning_rate': 0.06263965662129488, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:16<17:59, 3.68s/it] 44%|████▍ | 228/520 [14:20<17:58, 3.69s/it] {'loss': 1.2324, 'grad_norm': 0.0012221433186511935, 'learning_rate': 0.062337869884514674, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:20<17:58, 3.69s/it] 44%|████▍ | 229/520 [14:24<17:56, 3.70s/it] {'loss': 1.2207, 'grad_norm': 0.0011897897400887268, 'learning_rate': 0.06203560377044866, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:24<17:56, 3.70s/it] 44%|████▍ | 230/520 [14:27<18:00, 3.73s/it] {'loss': 1.1148, 'grad_norm': 0.0011774119951479013, 'learning_rate': 0.06173287002338577, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:27<18:00, 3.73s/it] 44%|████▍ | 231/520 [14:31<17:55, 3.72s/it] {'loss': 1.1826, 'grad_norm': 0.0011773007227078845, 'learning_rate': 0.06142968040578448, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:31<17:55, 3.72s/it] 45%|████▍ | 232/520 [14:35<17:46, 3.70s/it] {'loss': 1.2695, 'grad_norm': 0.001236120232254859, 'learning_rate': 0.06112604669781572, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:35<17:46, 3.70s/it] 45%|████▍ | 233/520 [14:39<17:41, 3.70s/it] {'loss': 1.1637, 'grad_norm': 0.0012906892369673089, 'learning_rate': 0.06082198069690514, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:39<17:41, 3.70s/it] 45%|████▌ | 234/520 [14:42<17:41, 3.71s/it] {'loss': 1.1414, 'grad_norm': 0.0013118111352577262, 'learning_rate': 0.06051749421727479, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:42<17:41, 3.71s/it] 45%|████▌ | 235/520 [14:46<17:34, 3.70s/it] {'loss': 1.1878, 'grad_norm': 0.0012769634656098455, 'learning_rate': 0.06021259908948402, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:46<17:34, 3.70s/it] 45%|████▌ | 236/520 [14:50<17:29, 3.70s/it] {'loss': 1.2501, 'grad_norm': 0.0013764886431994958, 'learning_rate': 0.059907307159969884, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:50<17:29, 3.70s/it] 46%|████▌ | 237/520 [14:53<17:26, 3.70s/it] {'loss': 1.2567, 'grad_norm': 0.0012313780631070259, 'learning_rate': 0.05960163029058682, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:53<17:26, 3.70s/it] 46%|████▌ | 238/520 [14:57<17:24, 3.70s/it] {'loss': 1.1903, 'grad_norm': 0.0012867188879384678, 'learning_rate': 0.05929558035814574, 'epoch': 0.46} + 46%|████▌ | 238/520 [14:57<17:24, 3.70s/it] 46%|████▌ | 239/520 [15:01<17:23, 3.72s/it] {'loss': 1.2537, 'grad_norm': 0.0012801389545301896, 'learning_rate': 0.05898916925395264, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:01<17:23, 3.72s/it] 46%|████▌ | 240/520 [15:04<17:17, 3.71s/it] {'loss': 1.086, 'grad_norm': 0.0011547754194053635, 'learning_rate': 0.058682408883346526, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:04<17:17, 3.71s/it] 46%|████▋ | 241/520 [15:08<17:10, 3.69s/it] {'loss': 1.1651, 'grad_norm': 0.0012383872611409388, 'learning_rate': 0.05837531116523682, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:08<17:10, 3.69s/it] 47%|████▋ | 242/520 [15:12<17:12, 3.71s/it] {'loss': 1.1723, 'grad_norm': 0.0011860116095082803, 'learning_rate': 0.05806788803164034, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:12<17:12, 3.71s/it] 47%|████▋ | 243/520 [15:16<17:05, 3.70s/it] {'loss': 1.1682, 'grad_norm': 0.0012469851192335592, 'learning_rate': 0.057760151427217576, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:16<17:05, 3.70s/it] 47%|████▋ | 244/520 [15:19<17:02, 3.70s/it] {'loss': 1.2656, 'grad_norm': 0.0012279432596319225, 'learning_rate': 0.05745211330880872, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:19<17:02, 3.70s/it] 47%|████▋ | 245/520 [15:23<17:00, 3.71s/it] {'loss': 1.1518, 'grad_norm': 0.001300381164025557, 'learning_rate': 0.057143785644969004, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:23<17:00, 3.71s/it] 47%|████▋ | 246/520 [15:27<16:56, 3.71s/it] {'loss': 1.2659, 'grad_norm': 0.001252055345872764, 'learning_rate': 0.05683518041550367, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:27<16:56, 3.71s/it] 48%|████▊ | 247/520 [15:30<16:55, 3.72s/it] {'loss': 1.3231, 'grad_norm': 0.0012939508390689702, 'learning_rate': 0.05652630961100259, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:30<16:55, 3.72s/it] 48%|████▊ | 248/520 [15:34<16:45, 3.70s/it] {'loss': 1.1456, 'grad_norm': 0.0012869934847506766, 'learning_rate': 0.05621718523237427, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:34<16:45, 3.70s/it] 48%|████▊ | 249/520 [15:38<16:39, 3.69s/it] {'loss': 1.2356, 'grad_norm': 0.001266600288256675, 'learning_rate': 0.05590781929037965, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:38<16:39, 3.69s/it] 48%|████▊ | 250/520 [15:41<16:38, 3.70s/it] {'loss': 1.1855, 'grad_norm': 0.0013120204315117982, 'learning_rate': 0.055598223805165395, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:41<16:38, 3.70s/it] 48%|████▊ | 251/520 [15:45<16:31, 3.69s/it] {'loss': 1.2478, 'grad_norm': 0.0011845191106516543, 'learning_rate': 0.0552884108057969, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:45<16:31, 3.69s/it] 48%|████▊ | 252/520 [15:49<16:25, 3.68s/it] {'loss': 1.1693, 'grad_norm': 0.0011566596965324166, 'learning_rate': 0.05497839232979084, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:49<16:25, 3.68s/it] 49%|████▊ | 253/520 [15:53<16:50, 3.78s/it] {'loss': 1.2294, 'grad_norm': 0.0013375676797591257, 'learning_rate': 0.05466818042264753, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:53<16:50, 3.78s/it] 49%|████▉ | 254/520 [15:57<17:11, 3.88s/it] {'loss': 1.176, 'grad_norm': 0.001193848512867573, 'learning_rate': 0.05435778713738292, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:57<17:11, 3.88s/it] 49%|████▉ | 255/520 [16:01<17:24, 3.94s/it] {'loss': 1.1777, 'grad_norm': 0.0013063982489113807, 'learning_rate': 0.05404722453406017, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:01<17:24, 3.94s/it] 49%|████▉ | 256/520 [16:05<17:30, 3.98s/it] {'loss': 1.235, 'grad_norm': 0.0013114133470307947, 'learning_rate': 0.05373650467932122, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:05<17:30, 3.98s/it] 49%|████▉ | 257/520 [16:09<17:34, 4.01s/it] {'loss': 1.2153, 'grad_norm': 0.0012636502246732263, 'learning_rate': 0.05342563964591784, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:09<17:34, 4.01s/it] 50%|████▉ | 258/520 [16:13<17:25, 3.99s/it] {'loss': 1.2202, 'grad_norm': 0.0011026572208290763, 'learning_rate': 0.053114641512242614, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:13<17:25, 3.99s/it] 50%|████▉ | 259/520 [16:17<17:15, 3.97s/it] {'loss': 1.2933, 'grad_norm': 0.0013934853619247025, 'learning_rate': 0.05280352236185959, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:17<17:15, 3.97s/it] 50%|█████ | 260/520 [16:21<17:11, 3.97s/it] {'loss': 1.2133, 'grad_norm': 0.0009918013649924367, 'learning_rate': 0.05249229428303486, 'epoch': 0.5} + 50%|█████ | 260/520 [16:21<17:11, 3.97s/it] 50%|█████ | 261/520 [16:25<17:01, 3.95s/it] {'loss': 1.1608, 'grad_norm': 0.0012071972130859583, 'learning_rate': 0.05218096936826681, 'epoch': 0.5} + 50%|█████ | 261/520 [16:25<17:01, 3.95s/it] 50%|█████ | 262/520 [16:29<16:58, 3.95s/it] {'loss': 1.1647, 'grad_norm': 0.001266912250772581, 'learning_rate': 0.05186955971381629, 'epoch': 0.5} + 50%|█████ | 262/520 [16:29<16:58, 3.95s/it] 51%|█████ | 263/520 [16:33<16:52, 3.94s/it] {'loss': 1.1861, 'grad_norm': 0.0012081064559699203, 'learning_rate': 0.05155807741923666, 'epoch': 0.51} + 51%|█████ | 263/520 [16:33<16:52, 3.94s/it] 51%|█████ | 264/520 [16:37<16:48, 3.94s/it] {'loss': 1.2452, 'grad_norm': 0.0011897166681048277, 'learning_rate': 0.05124653458690365, 'epoch': 0.51} + 51%|█████ | 264/520 [16:37<16:48, 3.94s/it] 51%|█████ | 265/520 [16:41<16:43, 3.94s/it] {'loss': 1.1719, 'grad_norm': 0.0013894276751826617, 'learning_rate': 0.05093494332154511, 'epoch': 0.51} + 51%|█████ | 265/520 [16:41<16:43, 3.94s/it] 51%|█████ | 266/520 [16:44<16:21, 3.86s/it] {'loss': 1.0561, 'grad_norm': 0.0011025389159007976, 'learning_rate': 0.05062331572977076, 'epoch': 0.51} + 51%|█████ | 266/520 [16:44<16:21, 3.86s/it] 51%|█████▏ | 267/520 [16:48<16:00, 3.80s/it] {'loss': 1.1693, 'grad_norm': 0.0011991261330721753, 'learning_rate': 0.05031166391960168, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:48<16:00, 3.80s/it] 52%|█████▏ | 268/520 [16:52<15:54, 3.79s/it] {'loss': 1.2812, 'grad_norm': 0.0012080929811762078, 'learning_rate': 0.05, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:52<15:54, 3.79s/it] 52%|█████▏ | 269/520 [16:55<15:38, 3.74s/it] {'loss': 1.2705, 'grad_norm': 0.0013379166532390796, 'learning_rate': 0.049688336080398326, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:55<15:38, 3.74s/it] 52%|█████▏ | 270/520 [16:59<15:33, 3.73s/it] {'loss': 1.1278, 'grad_norm': 0.0012238429656283218, 'learning_rate': 0.04937668427022925, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:59<15:33, 3.73s/it] 52%|█████▏ | 271/520 [17:03<15:28, 3.73s/it] {'loss': 1.2461, 'grad_norm': 0.0012925770777254422, 'learning_rate': 0.0490650566784549, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:03<15:28, 3.73s/it] 52%|█████▏ | 272/520 [17:07<15:23, 3.73s/it] {'loss': 1.1405, 'grad_norm': 0.0011846196030955512, 'learning_rate': 0.048753465413096365, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:07<15:23, 3.73s/it] 52%|█████▎ | 273/520 [17:10<15:19, 3.72s/it] {'loss': 1.2395, 'grad_norm': 0.0011804148037630156, 'learning_rate': 0.04844192258076335, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:10<15:19, 3.72s/it] 53%|█████▎ | 274/520 [17:14<15:12, 3.71s/it] {'loss': 1.2378, 'grad_norm': 0.0013246568047009604, 'learning_rate': 0.048130440286183726, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:14<15:12, 3.71s/it] 53%|█████▎ | 275/520 [17:18<15:08, 3.71s/it] {'loss': 1.1771, 'grad_norm': 0.0012472217837821578, 'learning_rate': 0.047819030631733206, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:18<15:08, 3.71s/it] 53%|█████▎ | 276/520 [17:21<15:03, 3.70s/it] {'loss': 1.2335, 'grad_norm': 0.0013394726066601132, 'learning_rate': 0.04750770571696514, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:21<15:03, 3.70s/it] 53%|█████▎ | 277/520 [17:25<15:00, 3.71s/it] {'loss': 1.255, 'grad_norm': 0.0011340165144957902, 'learning_rate': 0.04719647763814041, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:25<15:00, 3.71s/it] 53%|█████▎ | 278/520 [17:29<14:55, 3.70s/it] {'loss': 1.1396, 'grad_norm': 0.0011352671751403939, 'learning_rate': 0.0468853584877574, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:29<14:55, 3.70s/it] 54%|█████▎ | 279/520 [17:32<14:54, 3.71s/it] {'loss': 1.1306, 'grad_norm': 0.0012666543883720135, 'learning_rate': 0.04657436035408217, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:32<14:54, 3.71s/it] 54%|█████▍ | 280/520 [17:36<14:47, 3.70s/it] {'loss': 1.1678, 'grad_norm': 0.0013426620706720235, 'learning_rate': 0.04626349532067879, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:36<14:47, 3.70s/it] 54%|█████▍ | 281/520 [17:40<14:44, 3.70s/it] {'loss': 1.2692, 'grad_norm': 0.0013051197074133057, 'learning_rate': 0.04595277546593984, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:40<14:44, 3.70s/it] 54%|█████▍ | 282/520 [17:43<14:38, 3.69s/it] {'loss': 1.1501, 'grad_norm': 0.001189053376436797, 'learning_rate': 0.04564221286261709, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:43<14:38, 3.69s/it] 54%|█████▍ | 283/520 [17:47<14:36, 3.70s/it] {'loss': 1.281, 'grad_norm': 0.0013492173607884614, 'learning_rate': 0.045331819577352474, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:47<14:36, 3.70s/it] 55%|█████▍ | 284/520 [17:51<14:29, 3.69s/it] {'loss': 1.1402, 'grad_norm': 0.0012950345609338536, 'learning_rate': 0.04502160767020918, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:51<14:29, 3.69s/it] 55%|█████▍ | 285/520 [17:55<14:23, 3.67s/it] {'loss': 1.1726, 'grad_norm': 0.0012416455220675764, 'learning_rate': 0.04471158919420312, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:55<14:23, 3.67s/it] 55%|█████▌ | 286/520 [17:58<14:18, 3.67s/it] {'loss': 1.0581, 'grad_norm': 0.0012584000553967339, 'learning_rate': 0.04440177619483461, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:58<14:18, 3.67s/it] 55%|█████▌ | 287/520 [18:02<14:25, 3.72s/it] {'loss': 1.283, 'grad_norm': 0.0012319129457077785, 'learning_rate': 0.044092180709620364, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:02<14:25, 3.72s/it] 55%|█████▌ | 288/520 [18:06<14:26, 3.73s/it] {'loss': 1.2994, 'grad_norm': 0.001155616196219598, 'learning_rate': 0.04378281476762576, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:06<14:26, 3.73s/it] 56%|█████▌ | 289/520 [18:10<14:24, 3.74s/it] {'loss': 1.1938, 'grad_norm': 0.00119659880024051, 'learning_rate': 0.043473690388997434, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:10<14:24, 3.74s/it] 56%|█████▌ | 290/520 [18:13<14:20, 3.74s/it] {'loss': 1.1152, 'grad_norm': 0.001138252852516774, 'learning_rate': 0.04316481958449634, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:13<14:20, 3.74s/it] 56%|█████▌ | 291/520 [18:17<14:10, 3.71s/it] {'loss': 1.1504, 'grad_norm': 0.0012246752501853377, 'learning_rate': 0.04285621435503101, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:17<14:10, 3.71s/it] 56%|█████▌ | 292/520 [18:21<14:09, 3.72s/it] {'loss': 1.2097, 'grad_norm': 0.0012368902763953851, 'learning_rate': 0.04254788669119128, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:21<14:09, 3.72s/it] 56%|█████▋ | 293/520 [18:24<14:00, 3.70s/it] {'loss': 1.1595, 'grad_norm': 0.00129136042447514, 'learning_rate': 0.04223984857278242, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:24<14:00, 3.70s/it] 57%|█████▋ | 294/520 [18:28<14:00, 3.72s/it] {'loss': 1.1789, 'grad_norm': 0.0013717767095303996, 'learning_rate': 0.041932111968359664, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:28<14:00, 3.72s/it] 57%|█████▋ | 295/520 [18:32<13:54, 3.71s/it] {'loss': 1.1671, 'grad_norm': 0.0011099375234044201, 'learning_rate': 0.04162468883476319, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:32<13:54, 3.71s/it] 57%|█████▋ | 296/520 [18:35<13:47, 3.69s/it] {'loss': 1.1305, 'grad_norm': 0.0012720563645840025, 'learning_rate': 0.041317591116653486, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:35<13:47, 3.69s/it] 57%|█████▋ | 297/520 [18:39<13:52, 3.73s/it] {'loss': 1.2575, 'grad_norm': 0.0013299087204112936, 'learning_rate': 0.04101083074604737, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:39<13:52, 3.73s/it] 57%|█████▋ | 298/520 [18:43<13:57, 3.77s/it] {'loss': 1.2202, 'grad_norm': 0.0011680993819028099, 'learning_rate': 0.04070441964185428, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:43<13:57, 3.77s/it] 57%|█████▊ | 299/520 [18:47<13:57, 3.79s/it] {'loss': 1.2215, 'grad_norm': 0.0011797056831329897, 'learning_rate': 0.0403983697094132, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:47<13:57, 3.79s/it] 58%|█████▊ | 300/520 [18:51<13:55, 3.80s/it] {'loss': 1.2667, 'grad_norm': 0.0012366396081811995, 'learning_rate': 0.040092692840030135, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:51<13:55, 3.80s/it] 58%|█████▊ | 301/520 [18:55<13:58, 3.83s/it] {'loss': 1.2596, 'grad_norm': 0.0012608995070080213, 'learning_rate': 0.039787400910515996, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:55<13:58, 3.83s/it] 58%|█████▊ | 302/520 [18:59<13:57, 3.84s/it] {'loss': 1.2218, 'grad_norm': 0.0011577967069197569, 'learning_rate': 0.03948250578272522, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:59<13:57, 3.84s/it] 58%|█████▊ | 303/520 [19:02<14:02, 3.88s/it] {'loss': 1.176, 'grad_norm': 0.0013568442240423269, 'learning_rate': 0.03917801930309486, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:03<14:02, 3.88s/it] 58%|█████▊ | 304/520 [19:06<13:52, 3.85s/it] {'loss': 1.1378, 'grad_norm': 0.0012468455960736634, 'learning_rate': 0.03887395330218429, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:06<13:52, 3.85s/it] 59%|█████▊ | 305/520 [19:10<13:34, 3.79s/it] {'loss': 1.2771, 'grad_norm': 0.0014303954714048254, 'learning_rate': 0.03857031959421553, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:10<13:34, 3.79s/it] 59%|█████▉ | 306/520 [19:14<13:22, 3.75s/it] {'loss': 1.2254, 'grad_norm': 0.0012805004305572723, 'learning_rate': 0.03826712997661425, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:14<13:22, 3.75s/it] 59%|█████▉ | 307/520 [19:17<13:09, 3.71s/it] {'loss': 1.1691, 'grad_norm': 0.0012048313821589702, 'learning_rate': 0.03796439622955136, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:17<13:09, 3.71s/it] 59%|█████▉ | 308/520 [19:21<13:00, 3.68s/it] {'loss': 1.2821, 'grad_norm': 0.0011873231222827144, 'learning_rate': 0.03766213011548532, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:21<13:00, 3.68s/it] 59%|█████▉ | 309/520 [19:25<13:33, 3.85s/it] {'loss': 1.176, 'grad_norm': 0.0012209397533598366, 'learning_rate': 0.03736034337870512, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:25<13:33, 3.85s/it] 60%|█████▉ | 310/520 [19:29<13:15, 3.79s/it] {'loss': 1.1495, 'grad_norm': 0.0012446257277810117, 'learning_rate': 0.03705904774487396, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:29<13:15, 3.79s/it] 60%|█████▉ | 311/520 [19:32<13:05, 3.76s/it] {'loss': 1.1375, 'grad_norm': 0.0012066421803520171, 'learning_rate': 0.036758254920573635, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:32<13:05, 3.76s/it] 60%|██████ | 312/520 [19:37<13:24, 3.87s/it] {'loss': 1.1262, 'grad_norm': 0.0012893055398824455, 'learning_rate': 0.03645797659284975, 'epoch': 0.6} + 60%|██████ | 312/520 [19:37<13:24, 3.87s/it] 60%|██████ | 313/520 [19:40<13:12, 3.83s/it] {'loss': 1.1013, 'grad_norm': 0.001118311697735234, 'learning_rate': 0.03615822442875754, 'epoch': 0.6} + 60%|██████ | 313/520 [19:40<13:12, 3.83s/it] 60%|██████ | 314/520 [19:44<13:18, 3.87s/it] {'loss': 1.1431, 'grad_norm': 0.0012060661323617702, 'learning_rate': 0.035859010074908625, 'epoch': 0.6} + 60%|██████ | 314/520 [19:44<13:18, 3.87s/it] 61%|██████ | 315/520 [19:48<13:01, 3.81s/it] {'loss': 1.185, 'grad_norm': 0.001328731068914362, 'learning_rate': 0.035560345157018516, 'epoch': 0.61} + 61%|██████ | 315/520 [19:48<13:01, 3.81s/it] 61%|██████ | 316/520 [19:52<13:13, 3.89s/it] {'loss': 1.1312, 'grad_norm': 0.0012778703624041633, 'learning_rate': 0.035262241279454785, 'epoch': 0.61} + 61%|██████ | 316/520 [19:52<13:13, 3.89s/it] 61%|██████ | 317/520 [19:56<12:55, 3.82s/it] {'loss': 1.1319, 'grad_norm': 0.0011065509377427638, 'learning_rate': 0.03496471002478635, 'epoch': 0.61} + 61%|██████ | 317/520 [19:56<12:55, 3.82s/it] 61%|██████ | 318/520 [19:59<12:44, 3.78s/it] {'loss': 1.2409, 'grad_norm': 0.0013059692215002267, 'learning_rate': 0.03466776295333329, 'epoch': 0.61} + 61%|██████ | 318/520 [19:59<12:44, 3.78s/it] 61%|██████▏ | 319/520 [20:03<12:56, 3.86s/it] {'loss': 1.1273, 'grad_norm': 0.0011109810902007228, 'learning_rate': 0.03437141160271778, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:03<12:56, 3.86s/it] 62%|██████▏ | 320/520 [20:07<12:40, 3.80s/it] {'loss': 1.0713, 'grad_norm': 0.0012142660843423897, 'learning_rate': 0.034075667487415785, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:07<12:40, 3.80s/it] 62%|██████▏ | 321/520 [20:11<12:30, 3.77s/it] {'loss': 1.2644, 'grad_norm': 0.0012050944381069312, 'learning_rate': 0.033780542098309656, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:11<12:30, 3.77s/it] 62%|██████▏ | 322/520 [20:15<12:27, 3.77s/it] {'loss': 1.0829, 'grad_norm': 0.0011619815413035732, 'learning_rate': 0.03348604690224166, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:15<12:27, 3.77s/it] 62%|██████▏ | 323/520 [20:18<12:27, 3.79s/it] {'loss': 1.1564, 'grad_norm': 0.0012593543357233227, 'learning_rate': 0.03319219334156847, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:18<12:27, 3.79s/it] 62%|██████▏ | 324/520 [20:22<12:27, 3.81s/it] {'loss': 1.2106, 'grad_norm': 0.00126312851139057, 'learning_rate': 0.03289899283371657, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:22<12:27, 3.81s/it] 62%|██████▎ | 325/520 [20:26<12:24, 3.82s/it] {'loss': 1.2057, 'grad_norm': 0.001305825403426912, 'learning_rate': 0.03260645677073864, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:26<12:24, 3.82s/it] 63%|██████▎ | 326/520 [20:30<12:21, 3.82s/it] {'loss': 1.2035, 'grad_norm': 0.0013341859886185008, 'learning_rate': 0.03231459651887093, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:30<12:21, 3.82s/it] 63%|██████▎ | 327/520 [20:34<12:11, 3.79s/it] {'loss': 1.1875, 'grad_norm': 0.0012562943708353661, 'learning_rate': 0.032023423418091626, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:34<12:11, 3.79s/it] 63%|██████▎ | 328/520 [20:37<11:59, 3.75s/it] {'loss': 1.2456, 'grad_norm': 0.001279446380661004, 'learning_rate': 0.03173294878168025, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:37<11:59, 3.75s/it] 63%|██████▎ | 329/520 [20:41<11:51, 3.72s/it] {'loss': 1.1284, 'grad_norm': 0.0011127309746907545, 'learning_rate': 0.031443183895778104, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:41<11:51, 3.72s/it] 63%|██████▎ | 330/520 [20:45<11:44, 3.71s/it] {'loss': 1.2059, 'grad_norm': 0.0011572856606998274, 'learning_rate': 0.03115414001894974, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:45<11:44, 3.71s/it] 64%|██████▎ | 331/520 [20:48<11:37, 3.69s/it] {'loss': 1.1636, 'grad_norm': 0.0012932387580497859, 'learning_rate': 0.030865828381745515, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:48<11:37, 3.69s/it] 64%|██████▍ | 332/520 [20:52<11:34, 3.69s/it] {'loss': 1.2157, 'grad_norm': 0.0011234524542385635, 'learning_rate': 0.030578260186265267, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:52<11:34, 3.69s/it] 64%|██████▍ | 333/520 [20:56<11:27, 3.68s/it] {'loss': 1.299, 'grad_norm': 0.001323975508850516, 'learning_rate': 0.03029144660572304, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:56<11:27, 3.68s/it] 64%|██████▍ | 334/520 [20:59<11:22, 3.67s/it] {'loss': 1.2091, 'grad_norm': 0.0013256118125069224, 'learning_rate': 0.03000539878401296, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:59<11:22, 3.67s/it] 64%|██████▍ | 335/520 [21:03<11:19, 3.67s/it] {'loss': 1.2094, 'grad_norm': 0.0012027427202264877, 'learning_rate': 0.029720127835276257, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:03<11:19, 3.67s/it] 65%|██████▍ | 336/520 [21:07<11:18, 3.69s/it] {'loss': 1.1197, 'grad_norm': 0.001347202462501851, 'learning_rate': 0.029435644843469434, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:07<11:18, 3.69s/it] 65%|██████▍ | 337/520 [21:10<11:15, 3.69s/it] {'loss': 1.1098, 'grad_norm': 0.0012496340138416022, 'learning_rate': 0.029151960861933612, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:10<11:15, 3.69s/it] 65%|██████▌ | 338/520 [21:14<11:11, 3.69s/it] {'loss': 1.2168, 'grad_norm': 0.0012551474996226647, 'learning_rate': 0.02886908691296504, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:14<11:11, 3.69s/it] 65%|██████▌ | 339/520 [21:18<11:07, 3.69s/it] {'loss': 1.161, 'grad_norm': 0.0012790556558764908, 'learning_rate': 0.028587033987386858, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:18<11:07, 3.69s/it] 65%|██████▌ | 340/520 [21:21<11:08, 3.71s/it] {'loss': 1.1456, 'grad_norm': 0.0011955636870952429, 'learning_rate': 0.028305813044122097, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:21<11:08, 3.71s/it] 66%|██████▌ | 341/520 [21:25<11:05, 3.72s/it] {'loss': 1.1789, 'grad_norm': 0.0013233111804297744, 'learning_rate': 0.028025435009767747, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:25<11:05, 3.72s/it] 66%|██████▌ | 342/520 [21:29<10:59, 3.70s/it] {'loss': 1.1944, 'grad_norm': 0.001420522110635791, 'learning_rate': 0.02774591077817038, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:29<10:59, 3.70s/it] 66%|██████▌ | 343/520 [21:33<10:56, 3.71s/it] {'loss': 1.1432, 'grad_norm': 0.0010140514259414212, 'learning_rate': 0.027467251210002732, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:33<10:56, 3.71s/it] 66%|██████▌ | 344/520 [21:36<10:51, 3.70s/it] {'loss': 1.1346, 'grad_norm': 0.001133480846128711, 'learning_rate': 0.02718946713234185, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:36<10:51, 3.70s/it] 66%|██████▋ | 345/520 [21:40<10:44, 3.68s/it] {'loss': 1.2314, 'grad_norm': 0.0012549680359476242, 'learning_rate': 0.026912569338248316, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:40<10:44, 3.68s/it] 67%|██████▋ | 346/520 [21:44<10:41, 3.69s/it] {'loss': 1.1616, 'grad_norm': 0.001195555214562748, 'learning_rate': 0.0266365685863469, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:44<10:41, 3.69s/it] 67%|██████▋ | 347/520 [21:47<10:34, 3.67s/it] {'loss': 1.1526, 'grad_norm': 0.001130342934866173, 'learning_rate': 0.02636147560040866, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:47<10:34, 3.67s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:51<10:31, 3.67s/it] {'loss': 1.1118, 'grad_norm': 0.0014505069562239885, 'learning_rate': 0.026087301068934105, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:51<10:31, 3.67s/it] 67%|██████▋ | 349/520 [21:55<10:26, 3.67s/it] {'loss': 1.1441, 'grad_norm': 0.0012440411904459327, 'learning_rate': 0.025814055644738012, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:55<10:26, 3.67s/it] 67%|██████▋ | 350/520 [21:58<10:23, 3.66s/it] {'loss': 1.1894, 'grad_norm': 0.0012809645255183423, 'learning_rate': 0.025541749944535553, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:58<10:23, 3.66s/it] 68%|██████▊ | 351/520 [22:02<10:19, 3.67s/it] {'loss': 1.1011, 'grad_norm': 0.00117370673270271, 'learning_rate': 0.02527039454852963, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:02<10:19, 3.67s/it] 68%|██████▊ | 352/520 [22:06<10:13, 3.65s/it] {'loss': 1.2129, 'grad_norm': 0.0011599699900612856, 'learning_rate': 0.025000000000000012, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:06<10:13, 3.65s/it] 68%|██████▊ | 353/520 [22:09<10:11, 3.66s/it] {'loss': 1.1371, 'grad_norm': 0.0010173127923462353, 'learning_rate': 0.02473057680489348, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:09<10:11, 3.66s/it] 68%|██████▊ | 354/520 [22:13<10:09, 3.67s/it] {'loss': 1.2279, 'grad_norm': 0.0011295434134788618, 'learning_rate': 0.024462135431415732, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:13<10:09, 3.67s/it] 68%|██████▊ | 355/520 [22:17<10:04, 3.66s/it] {'loss': 1.1627, 'grad_norm': 0.0012355262608538954, 'learning_rate': 0.024194686309624666, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:17<10:04, 3.66s/it] 68%|██████▊ | 356/520 [22:20<10:01, 3.67s/it] {'loss': 1.1636, 'grad_norm': 0.001266932802824988, 'learning_rate': 0.0239282398310251, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:20<10:01, 3.67s/it] 69%|██████▊ | 357/520 [22:24<09:57, 3.67s/it] {'loss': 1.2013, 'grad_norm': 0.0011728026407174872, 'learning_rate': 0.023662806348164964, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:24<09:57, 3.67s/it] 69%|██████▉ | 358/520 [22:28<09:55, 3.68s/it] {'loss': 1.1237, 'grad_norm': 0.0012691342731595886, 'learning_rate': 0.02339839617423318, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:28<09:55, 3.68s/it] 69%|██████▉ | 359/520 [22:31<09:52, 3.68s/it] {'loss': 1.1726, 'grad_norm': 0.0012348622947526966, 'learning_rate': 0.023135019582658803, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:31<09:52, 3.68s/it] 69%|██████▉ | 360/520 [22:35<09:47, 3.67s/it] {'loss': 1.1771, 'grad_norm': 0.0011940918456471129, 'learning_rate': 0.022872686806712034, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:35<09:47, 3.67s/it] 69%|██████▉ | 361/520 [22:39<09:45, 3.68s/it] {'loss': 1.1947, 'grad_norm': 0.0010789816009575254, 'learning_rate': 0.02261140803910644, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:39<09:45, 3.68s/it] 70%|██████▉ | 362/520 [22:42<09:43, 3.69s/it] {'loss': 1.1731, 'grad_norm': 0.0013416886778439305, 'learning_rate': 0.02235119343160303, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:42<09:43, 3.69s/it] 70%|██████▉ | 363/520 [22:46<09:36, 3.67s/it] {'loss': 1.205, 'grad_norm': 0.0012275162281391455, 'learning_rate': 0.022092053094615812, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:46<09:36, 3.67s/it] 70%|███████ | 364/520 [22:50<09:35, 3.69s/it] {'loss': 1.2096, 'grad_norm': 0.0012360656589857483, 'learning_rate': 0.021833997096818897, 'epoch': 0.7} + 70%|███████ | 364/520 [22:50<09:35, 3.69s/it] 70%|███████ | 365/520 [22:53<09:30, 3.68s/it] {'loss': 1.2559, 'grad_norm': 0.0012713795106029783, 'learning_rate': 0.021577035464755392, 'epoch': 0.7} + 70%|███████ | 365/520 [22:53<09:30, 3.68s/it] 70%|███████ | 366/520 [22:57<09:23, 3.66s/it] {'loss': 1.2229, 'grad_norm': 0.0011938188153720482, 'learning_rate': 0.02132117818244771, 'epoch': 0.7} + 70%|███████ | 366/520 [22:57<09:23, 3.66s/it] 71%|███████ | 367/520 [23:01<09:20, 3.67s/it] {'loss': 1.2214, 'grad_norm': 0.0012802697760596394, 'learning_rate': 0.021066435191009716, 'epoch': 0.71} + 71%|███████ | 367/520 [23:01<09:20, 3.67s/it] 71%|███████ | 368/520 [23:04<09:20, 3.69s/it] {'loss': 1.0761, 'grad_norm': 0.0012445454966495235, 'learning_rate': 0.02081281638826052, 'epoch': 0.71} + 71%|███████ | 368/520 [23:04<09:20, 3.69s/it] 71%|███████ | 369/520 [23:08<09:16, 3.69s/it] {'loss': 1.1733, 'grad_norm': 0.0010945349877722309, 'learning_rate': 0.02056033162833977, 'epoch': 0.71} + 71%|███████ | 369/520 [23:08<09:16, 3.69s/it] 71%|███████ | 370/520 [23:12<09:14, 3.69s/it] {'loss': 1.1352, 'grad_norm': 0.0011708638304799737, 'learning_rate': 0.02030899072132493, 'epoch': 0.71} + 71%|███████ | 370/520 [23:12<09:14, 3.69s/it] 71%|███████▏ | 371/520 [23:15<09:08, 3.68s/it] {'loss': 1.1224, 'grad_norm': 0.0012979351153024163, 'learning_rate': 0.02005880343284999, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:15<09:08, 3.68s/it] 72%|███████▏ | 372/520 [23:19<09:05, 3.68s/it] {'loss': 1.2408, 'grad_norm': 0.0010862901740741483, 'learning_rate': 0.01980977948372612, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:19<09:05, 3.68s/it] 72%|███████▏ | 373/520 [23:23<09:03, 3.70s/it] {'loss': 1.1293, 'grad_norm': 0.00129337296038331, 'learning_rate': 0.01956192854956397, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:23<09:03, 3.70s/it] 72%|███████▏ | 374/520 [23:26<08:57, 3.68s/it] {'loss': 1.221, 'grad_norm': 0.001282982493968197, 'learning_rate': 0.019315260260397638, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:26<08:57, 3.68s/it] 72%|███████▏ | 375/520 [23:30<08:53, 3.68s/it] {'loss': 1.1369, 'grad_norm': 0.0012661433040554641, 'learning_rate': 0.019069784200310594, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:30<08:53, 3.68s/it] 72%|███████▏ | 376/520 [23:34<08:53, 3.71s/it] {'loss': 1.2399, 'grad_norm': 0.00118336803820421, 'learning_rate': 0.018825509907063328, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:34<08:53, 3.71s/it] 72%|███████▎ | 377/520 [23:38<08:47, 3.69s/it] {'loss': 1.1709, 'grad_norm': 0.001346738977726453, 'learning_rate': 0.018582446871722638, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:38<08:47, 3.69s/it] 73%|███████▎ | 378/520 [23:41<08:43, 3.69s/it] {'loss': 1.2361, 'grad_norm': 0.001178803036731523, 'learning_rate': 0.018340604538293014, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:41<08:43, 3.69s/it] 73%|███████▎ | 379/520 [23:45<08:37, 3.67s/it] {'loss': 1.2021, 'grad_norm': 0.00115123955016492, 'learning_rate': 0.018099992303349577, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:45<08:37, 3.67s/it] 73%|███████▎ | 380/520 [23:49<08:33, 3.66s/it] {'loss': 1.2185, 'grad_norm': 0.0012428431239581259, 'learning_rate': 0.017860619515673033, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:49<08:33, 3.66s/it] 73%|███████▎ | 381/520 [23:52<08:28, 3.66s/it] {'loss': 1.2126, 'grad_norm': 0.0011628453512003481, 'learning_rate': 0.017622495475886485, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:52<08:28, 3.66s/it] 73%|███████▎ | 382/520 [23:56<08:24, 3.65s/it] {'loss': 1.1868, 'grad_norm': 0.0011151213183565147, 'learning_rate': 0.01738562943609396, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:56<08:24, 3.65s/it] 74%|███████▎ | 383/520 [23:59<08:20, 3.66s/it] {'loss': 1.0578, 'grad_norm': 0.0013367610705491513, 'learning_rate': 0.01715003059952098, 'epoch': 0.74} + 74%|███████▎ | 383/520 [23:59<08:20, 3.66s/it] 74%|███████▍ | 384/520 [24:03<08:17, 3.66s/it] {'loss': 1.2127, 'grad_norm': 0.0010557877788629363, 'learning_rate': 0.016915708120157042, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:03<08:17, 3.66s/it] 74%|███████▍ | 385/520 [24:07<08:12, 3.65s/it] {'loss': 1.1983, 'grad_norm': 0.0011792010772236439, 'learning_rate': 0.016682671102399804, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:07<08:12, 3.65s/it] 74%|███████▍ | 386/520 [24:10<08:12, 3.67s/it] {'loss': 1.1484, 'grad_norm': 0.0010482366006137804, 'learning_rate': 0.016450928600701503, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:10<08:12, 3.67s/it] 74%|███████▍ | 387/520 [24:14<08:13, 3.71s/it] {'loss': 1.2384, 'grad_norm': 0.0011821736951338848, 'learning_rate': 0.016220489619216988, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:14<08:13, 3.71s/it] 75%|███████▍ | 388/520 [24:18<08:13, 3.74s/it] {'loss': 1.11, 'grad_norm': 0.0011695254273194821, 'learning_rate': 0.01599136311145402, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:18<08:13, 3.74s/it] 75%|███████▍ | 389/520 [24:22<08:06, 3.71s/it] {'loss': 1.155, 'grad_norm': 0.00147776992069973, 'learning_rate': 0.015763557979925326, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:22<08:06, 3.71s/it] 75%|███████▌ | 390/520 [24:26<08:05, 3.74s/it] {'loss': 1.2243, 'grad_norm': 0.0011777190484338659, 'learning_rate': 0.015537083075802649, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:26<08:05, 3.74s/it] 75%|███████▌ | 391/520 [24:29<08:06, 3.77s/it] {'loss': 1.2827, 'grad_norm': 0.0012371792832115507, 'learning_rate': 0.015311947198572918, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:29<08:06, 3.77s/it] 75%|███████▌ | 392/520 [24:33<08:05, 3.80s/it] {'loss': 1.1113, 'grad_norm': 0.0012054388429768414, 'learning_rate': 0.015088159095696364, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:33<08:05, 3.80s/it] 76%|███████▌ | 393/520 [24:37<08:04, 3.82s/it] {'loss': 1.1003, 'grad_norm': 0.0010472550671595574, 'learning_rate': 0.014865727462266543, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:37<08:04, 3.82s/it] 76%|███████▌ | 394/520 [24:41<08:03, 3.84s/it] {'loss': 1.1792, 'grad_norm': 0.0012855979793600192, 'learning_rate': 0.014644660940672627, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:41<08:03, 3.84s/it] 76%|███████▌ | 395/520 [24:45<08:00, 3.84s/it] {'loss': 1.1465, 'grad_norm': 0.0013235316884907618, 'learning_rate': 0.014424968120263504, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:45<08:00, 3.84s/it] 76%|███████▌ | 396/520 [24:49<07:55, 3.84s/it] {'loss': 1.2217, 'grad_norm': 0.0013100254525505702, 'learning_rate': 0.014206657537014078, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:49<07:55, 3.84s/it] 76%|███████▋ | 397/520 [24:53<07:57, 3.88s/it] {'loss': 1.196, 'grad_norm': 0.001211833091260805, 'learning_rate': 0.013989737673193682, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:53<07:57, 3.88s/it] 77%|███████▋ | 398/520 [24:56<07:51, 3.86s/it] {'loss': 1.1921, 'grad_norm': 0.001297561801708084, 'learning_rate': 0.013774216957036367, 'epoch': 0.77} + 77%|███████▋ | 398/520 [24:56<07:51, 3.86s/it] 77%|███████▋ | 399/520 [25:01<07:56, 3.94s/it] {'loss': 1.1304, 'grad_norm': 0.0011655241256783855, 'learning_rate': 0.013560103762413584, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:01<07:56, 3.94s/it] 77%|███████▋ | 400/520 [25:05<08:01, 4.01s/it] {'loss': 1.1636, 'grad_norm': 0.001118811914445664, 'learning_rate': 0.013347406408508695, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:05<08:01, 4.01s/it] 77%|███████▋ | 401/520 [25:09<08:01, 4.04s/it] {'loss': 1.0377, 'grad_norm': 0.0013069701508525468, 'learning_rate': 0.013136133159493801, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:09<08:01, 4.04s/it] 77%|███████▋ | 402/520 [25:13<07:57, 4.04s/it] {'loss': 1.163, 'grad_norm': 0.001266629383995101, 'learning_rate': 0.012926292224208664, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:13<07:57, 4.04s/it] 78%|███████▊ | 403/520 [25:17<07:49, 4.01s/it] {'loss': 1.1836, 'grad_norm': 0.0013470886734477988, 'learning_rate': 0.012717891755841721, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:17<07:49, 4.01s/it] 78%|███████▊ | 404/520 [25:21<07:43, 3.99s/it] {'loss': 1.0982, 'grad_norm': 0.001425290178283298, 'learning_rate': 0.012510939851613286, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:21<07:43, 3.99s/it] 78%|███████▊ | 405/520 [25:25<07:40, 4.00s/it] {'loss': 1.1461, 'grad_norm': 0.0011656031239480394, 'learning_rate': 0.01230544455246101, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:25<07:40, 4.00s/it] 78%|███████▊ | 406/520 [25:29<07:42, 4.06s/it] {'loss': 1.0738, 'grad_norm': 0.0014296135342159496, 'learning_rate': 0.012101413842727345, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:29<07:42, 4.06s/it] 78%|███████▊ | 407/520 [25:33<07:43, 4.10s/it] {'loss': 1.2647, 'grad_norm': 0.0012531811270457935, 'learning_rate': 0.01189885564984946, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:33<07:43, 4.10s/it] 78%|███████▊ | 408/520 [25:37<07:42, 4.13s/it] {'loss': 1.179, 'grad_norm': 0.0013505840088987163, 'learning_rate': 0.011697777844051106, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:37<07:42, 4.13s/it] 79%|███████▊ | 409/520 [25:42<07:41, 4.16s/it] {'loss': 1.294, 'grad_norm': 0.001312722441444962, 'learning_rate': 0.01149818823803686, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:42<07:41, 4.16s/it] 79%|███████▉ | 410/520 [25:46<07:38, 4.17s/it] {'loss': 1.0364, 'grad_norm': 0.0012474027885391087, 'learning_rate': 0.011300094586688632, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:46<07:38, 4.17s/it] 79%|███████▉ | 411/520 [25:50<07:35, 4.18s/it] {'loss': 1.2753, 'grad_norm': 0.0013264831279018385, 'learning_rate': 0.011103504586764262, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:50<07:35, 4.18s/it] 79%|███████▉ | 412/520 [25:54<07:32, 4.19s/it] {'loss': 1.182, 'grad_norm': 0.001221406723405306, 'learning_rate': 0.01090842587659851, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:54<07:32, 4.19s/it] 79%|███████▉ | 413/520 [25:58<07:29, 4.20s/it] {'loss': 1.1617, 'grad_norm': 0.0011604124141167517, 'learning_rate': 0.010714866035806327, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:58<07:29, 4.20s/it] 80%|███████▉ | 414/520 [26:03<07:26, 4.21s/it] {'loss': 0.9733, 'grad_norm': 0.0009979658139484468, 'learning_rate': 0.010522832584988234, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:03<07:26, 4.21s/it] 80%|███████▉ | 415/520 [26:07<07:22, 4.21s/it] {'loss': 1.1657, 'grad_norm': 0.001165400973613449, 'learning_rate': 0.010332332985438248, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:07<07:22, 4.21s/it] 80%|████████ | 416/520 [26:11<07:11, 4.15s/it] {'loss': 1.0711, 'grad_norm': 0.0013323004209116062, 'learning_rate': 0.010143374638853892, 'epoch': 0.8} + 80%|████████ | 416/520 [26:11<07:11, 4.15s/it] 80%|████████ | 417/520 [26:15<06:54, 4.02s/it] {'loss': 1.2325, 'grad_norm': 0.0012251851667383503, 'learning_rate': 0.009955964887048608, 'epoch': 0.8} + 80%|████████ | 417/520 [26:15<06:54, 4.02s/it] 80%|████████ | 418/520 [26:18<06:41, 3.94s/it] {'loss': 1.2272, 'grad_norm': 0.0011455257799510353, 'learning_rate': 0.009770111011666582, 'epoch': 0.8} + 80%|████████ | 418/520 [26:18<06:41, 3.94s/it] 81%|████████ | 419/520 [26:22<06:30, 3.86s/it] {'loss': 1.219, 'grad_norm': 0.0013572533929689006, 'learning_rate': 0.00958582023389974, 'epoch': 0.81} + 81%|████████ | 419/520 [26:22<06:30, 3.86s/it] 81%|████████ | 420/520 [26:26<06:20, 3.80s/it] {'loss': 1.1109, 'grad_norm': 0.0012877094207506894, 'learning_rate': 0.009403099714207176, 'epoch': 0.81} + 81%|████████ | 420/520 [26:26<06:20, 3.80s/it] 81%|████████ | 421/520 [26:29<06:13, 3.77s/it] {'loss': 1.0488, 'grad_norm': 0.0012747078871708636, 'learning_rate': 0.009221956552036992, 'epoch': 0.81} + 81%|████████ | 421/520 [26:29<06:13, 3.77s/it] 81%|████████ | 422/520 [26:33<06:07, 3.75s/it] {'loss': 1.1713, 'grad_norm': 0.0013037365912949468, 'learning_rate': 0.009042397785550405, 'epoch': 0.81} + 81%|████████ | 422/520 [26:33<06:07, 3.75s/it] 81%|████████▏ | 423/520 [26:37<06:01, 3.72s/it] {'loss': 1.1381, 'grad_norm': 0.0013311067518494734, 'learning_rate': 0.008864430391348333, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:37<06:01, 3.72s/it] 82%|████████▏ | 424/520 [26:41<05:57, 3.72s/it] {'loss': 1.2463, 'grad_norm': 0.0011300100737232088, 'learning_rate': 0.008688061284200266, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:41<05:57, 3.72s/it] 82%|████████▏ | 425/520 [26:44<05:52, 3.71s/it] {'loss': 1.158, 'grad_norm': 0.0012399982877761694, 'learning_rate': 0.008513297316775626, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:44<05:52, 3.71s/it] 82%|████████▏ | 426/520 [26:48<05:46, 3.69s/it] {'loss': 1.1929, 'grad_norm': 0.001631507228461975, 'learning_rate': 0.00834014527937756, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:48<05:46, 3.69s/it] 82%|████████▏ | 427/520 [26:52<05:41, 3.67s/it] {'loss': 1.0896, 'grad_norm': 0.0011859019111362561, 'learning_rate': 0.008168611899679013, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:52<05:41, 3.67s/it] 82%|████████▏ | 428/520 [26:55<05:38, 3.68s/it] {'loss': 1.084, 'grad_norm': 0.0013460755748622786, 'learning_rate': 0.00799870384246143, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:55<05:38, 3.68s/it] 82%|████████▎ | 429/520 [26:59<05:35, 3.68s/it] {'loss': 1.18, 'grad_norm': 0.0012394512972203737, 'learning_rate': 0.007830427709355726, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:59<05:35, 3.68s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:03<05:34, 3.72s/it] {'loss': 1.1784, 'grad_norm': 0.0011736140107788682, 'learning_rate': 0.0076637900385857945, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:03<05:34, 3.72s/it] 83%|████████▎ | 431/520 [27:06<05:30, 3.71s/it] {'loss': 1.135, 'grad_norm': 0.0011911387765708528, 'learning_rate': 0.007498797304714544, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:06<05:30, 3.71s/it] 83%|████████▎ | 432/520 [27:10<05:27, 3.72s/it] {'loss': 1.0855, 'grad_norm': 0.0012908183530237983, 'learning_rate': 0.00733545591839222, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:10<05:27, 3.72s/it] 83%|████████▎ | 433/520 [27:14<05:23, 3.72s/it] {'loss': 1.2204, 'grad_norm': 0.001214873737795761, 'learning_rate': 0.007173772226107433, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:14<05:23, 3.72s/it] 83%|████████▎ | 434/520 [27:18<05:19, 3.72s/it] {'loss': 0.9733, 'grad_norm': 0.0012881912140378345, 'learning_rate': 0.0070137525099404855, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:18<05:19, 3.72s/it] 84%|████████▎ | 435/520 [27:21<05:19, 3.75s/it] {'loss': 1.2501, 'grad_norm': 0.0013366523447292703, 'learning_rate': 0.006855402987319348, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:21<05:19, 3.75s/it] 84%|████████▍ | 436/520 [27:25<05:17, 3.78s/it] {'loss': 1.063, 'grad_norm': 0.0013064544040086133, 'learning_rate': 0.006698729810778065, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:25<05:17, 3.78s/it] 84%|████████▍ | 437/520 [27:29<05:14, 3.79s/it] {'loss': 1.2742, 'grad_norm': 0.0012741090708070766, 'learning_rate': 0.00654373906771768, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:29<05:14, 3.79s/it] 84%|████████▍ | 438/520 [27:33<05:12, 3.81s/it] {'loss': 1.0924, 'grad_norm': 0.0012262502962664243, 'learning_rate': 0.006390436780169734, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:33<05:12, 3.81s/it] 84%|████████▍ | 439/520 [27:37<05:08, 3.81s/it] {'loss': 1.1186, 'grad_norm': 0.0009900701161696742, 'learning_rate': 0.006238828904562316, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:37<05:08, 3.81s/it] 85%|████████▍ | 440/520 [27:41<05:04, 3.80s/it] {'loss': 1.1298, 'grad_norm': 0.0013031698133062202, 'learning_rate': 0.006088921331488567, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:41<05:04, 3.80s/it] 85%|████████▍ | 441/520 [27:44<04:59, 3.79s/it] {'loss': 1.1319, 'grad_norm': 0.0012014310303906563, 'learning_rate': 0.00594071988547788, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:44<04:59, 3.79s/it] 85%|████████▌ | 442/520 [27:48<04:53, 3.76s/it] {'loss': 1.1914, 'grad_norm': 0.001371714155233398, 'learning_rate': 0.005794230324769518, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:48<04:53, 3.76s/it] 85%|████████▌ | 443/520 [27:52<04:48, 3.74s/it] {'loss': 1.1992, 'grad_norm': 0.001203625546669196, 'learning_rate': 0.0056494583410889145, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:52<04:48, 3.74s/it] 85%|████████▌ | 444/520 [27:55<04:44, 3.74s/it] {'loss': 1.1695, 'grad_norm': 0.0011078911038243772, 'learning_rate': 0.005506409559426573, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:55<04:44, 3.74s/it] 86%|████████▌ | 445/520 [27:59<04:40, 3.74s/it] {'loss': 1.0971, 'grad_norm': 0.0011795841719250792, 'learning_rate': 0.005365089537819435, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:59<04:40, 3.74s/it] 86%|████████▌ | 446/520 [28:03<04:36, 3.73s/it] {'loss': 1.2078, 'grad_norm': 0.0010899816752699839, 'learning_rate': 0.005225503767134954, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:03<04:36, 3.73s/it] 86%|████████▌ | 447/520 [28:07<04:32, 3.73s/it] {'loss': 1.1633, 'grad_norm': 0.0012158931046400727, 'learning_rate': 0.005087657670857799, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:07<04:32, 3.73s/it] 86%|████████▌ | 448/520 [28:10<04:31, 3.77s/it] {'loss': 1.1648, 'grad_norm': 0.0013369842748983426, 'learning_rate': 0.004951556604879049, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:10<04:31, 3.77s/it] 86%|████████▋ | 449/520 [28:14<04:29, 3.79s/it] {'loss': 1.1686, 'grad_norm': 0.0011948007191911237, 'learning_rate': 0.004817205857288176, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:14<04:29, 3.79s/it] 87%|████████▋ | 450/520 [28:18<04:25, 3.79s/it] {'loss': 1.191, 'grad_norm': 0.0012687851410414482, 'learning_rate': 0.004684610648167504, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:18<04:25, 3.79s/it] 87%|████████▋ | 451/520 [28:22<04:21, 3.79s/it] {'loss': 1.1968, 'grad_norm': 0.0012944192735432336, 'learning_rate': 0.004553776129389453, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:22<04:21, 3.79s/it] 87%|████████▋ | 452/520 [28:26<04:16, 3.78s/it] {'loss': 1.2183, 'grad_norm': 0.001149884038961839, 'learning_rate': 0.004424707384416343, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:26<04:16, 3.78s/it] 87%|████████▋ | 453/520 [28:29<04:11, 3.76s/it] {'loss': 1.1905, 'grad_norm': 0.0011442362381926642, 'learning_rate': 0.0042974094281028495, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:29<04:11, 3.76s/it] 87%|████████▋ | 454/520 [28:33<04:09, 3.78s/it] {'loss': 1.1029, 'grad_norm': 0.0012347601339571456, 'learning_rate': 0.00417188720650119, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:33<04:09, 3.78s/it] 88%|████████▊ | 455/520 [28:37<04:08, 3.82s/it] {'loss': 1.2417, 'grad_norm': 0.0012472782274636964, 'learning_rate': 0.004048145596668967, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:37<04:08, 3.82s/it] 88%|████████▊ | 456/520 [28:41<04:07, 3.87s/it] {'loss': 1.1763, 'grad_norm': 0.0012583378594247607, 'learning_rate': 0.003926189406479613, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:41<04:07, 3.87s/it] 88%|████████▊ | 457/520 [28:45<04:04, 3.89s/it] {'loss': 1.0758, 'grad_norm': 0.0010519868356168543, 'learning_rate': 0.0038060233744356634, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:45<04:04, 3.89s/it] 88%|████████▊ | 458/520 [28:49<04:01, 3.90s/it] {'loss': 1.2935, 'grad_norm': 0.0013424923062207948, 'learning_rate': 0.003687652169484568, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:49<04:01, 3.90s/it] 88%|████████▊ | 459/520 [28:53<03:58, 3.91s/it] {'loss': 1.2232, 'grad_norm': 0.0012190590904387405, 'learning_rate': 0.0035710803908373225, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:53<03:58, 3.91s/it] 88%|████████▊ | 460/520 [28:57<03:50, 3.85s/it] {'loss': 1.113, 'grad_norm': 0.0012043904985619366, 'learning_rate': 0.0034563125677897935, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:57<03:50, 3.85s/it] 89%|████████▊ | 461/520 [29:00<03:45, 3.82s/it] {'loss': 1.1611, 'grad_norm': 0.0008985588729597775, 'learning_rate': 0.0033433531595466748, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:00<03:45, 3.82s/it] 89%|████████▉ | 462/520 [29:04<03:39, 3.79s/it] {'loss': 1.2557, 'grad_norm': 0.0011684540729722968, 'learning_rate': 0.0032322065550483003, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:04<03:39, 3.79s/it] 89%|████████▉ | 463/520 [29:08<03:33, 3.75s/it] {'loss': 1.0895, 'grad_norm': 0.0012884840033632534, 'learning_rate': 0.0031228770728000455, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:08<03:33, 3.75s/it] 89%|████████▉ | 464/520 [29:11<03:30, 3.76s/it] {'loss': 1.2072, 'grad_norm': 0.001254350856591662, 'learning_rate': 0.0030153689607045845, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:11<03:30, 3.76s/it] 89%|████████▉ | 465/520 [29:15<03:25, 3.74s/it] {'loss': 1.3092, 'grad_norm': 0.0012868962273862167, 'learning_rate': 0.002909686395896827, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:15<03:25, 3.74s/it] 90%|████████▉ | 466/520 [29:19<03:21, 3.73s/it] {'loss': 1.2075, 'grad_norm': 0.001131484818673557, 'learning_rate': 0.0028058334845816216, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:19<03:21, 3.73s/it] 90%|████████▉ | 467/520 [29:23<03:17, 3.74s/it] {'loss': 1.1465, 'grad_norm': 0.0011352596626962152, 'learning_rate': 0.002703814261874199, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:23<03:17, 3.74s/it] 90%|█████████ | 468/520 [29:26<03:13, 3.72s/it] {'loss': 1.1719, 'grad_norm': 0.0014114828852423353, 'learning_rate': 0.002603632691643415, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:26<03:13, 3.72s/it] 90%|█████████ | 469/520 [29:30<03:08, 3.70s/it] {'loss': 1.2412, 'grad_norm': 0.0013376674951250955, 'learning_rate': 0.0025052926663577005, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:30<03:08, 3.70s/it] 90%|█████████ | 470/520 [29:34<03:04, 3.70s/it] {'loss': 1.1134, 'grad_norm': 0.001151689720260564, 'learning_rate': 0.0024087980069338825, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:34<03:04, 3.70s/it] 91%|█████████ | 471/520 [29:37<02:59, 3.67s/it] {'loss': 1.1418, 'grad_norm': 0.0013407770813258174, 'learning_rate': 0.002314152462588659, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:37<02:59, 3.67s/it] 91%|█████████ | 472/520 [29:41<02:56, 3.68s/it] {'loss': 1.1097, 'grad_norm': 0.0011757477701347292, 'learning_rate': 0.0022213597106929607, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:41<02:56, 3.68s/it] 91%|█████████ | 473/520 [29:45<02:53, 3.68s/it] {'loss': 1.1828, 'grad_norm': 0.0012808072458893585, 'learning_rate': 0.0021304233566290967, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:45<02:53, 3.68s/it] 91%|█████████ | 474/520 [29:48<02:49, 3.68s/it] {'loss': 1.1794, 'grad_norm': 0.0011418606673968853, 'learning_rate': 0.002041346933650612, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:48<02:49, 3.68s/it] 91%|█████████▏| 475/520 [29:52<02:46, 3.70s/it] {'loss': 1.0956, 'grad_norm': 0.0011361182301664764, 'learning_rate': 0.0019541339027450257, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:52<02:46, 3.70s/it] 92%|█████████▏| 476/520 [29:56<02:43, 3.71s/it] {'loss': 1.1642, 'grad_norm': 0.0012987620803346959, 'learning_rate': 0.0018687876524993985, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:56<02:43, 3.71s/it] 92%|█████████▏| 477/520 [29:59<02:39, 3.71s/it] {'loss': 1.1626, 'grad_norm': 0.001379897488299531, 'learning_rate': 0.001785311498968617, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:59<02:39, 3.71s/it] 92%|█████████▏| 478/520 [30:03<02:36, 3.73s/it] {'loss': 1.103, 'grad_norm': 0.0012103398286070499, 'learning_rate': 0.00170370868554659, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:03<02:36, 3.73s/it] 92%|█████████▏| 479/520 [30:07<02:31, 3.70s/it] {'loss': 1.1536, 'grad_norm': 0.0012870507549373144, 'learning_rate': 0.0016239823828401946, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:07<02:31, 3.70s/it] 92%|█████████▏| 480/520 [30:11<02:27, 3.69s/it] {'loss': 1.1673, 'grad_norm': 0.0011406469461050961, 'learning_rate': 0.0015461356885461076, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:11<02:27, 3.69s/it] 92%|█████████▎| 481/520 [30:14<02:24, 3.70s/it] {'loss': 1.1568, 'grad_norm': 0.001078280790819342, 'learning_rate': 0.0014701716273304523, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:14<02:24, 3.70s/it] 93%|█████████▎| 482/520 [30:18<02:19, 3.68s/it] {'loss': 1.1783, 'grad_norm': 0.0011297294503726804, 'learning_rate': 0.0013960931507112752, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:18<02:19, 3.68s/it] 93%|█████████▎| 483/520 [30:22<02:16, 3.70s/it] {'loss': 1.172, 'grad_norm': 0.0012262902650054522, 'learning_rate': 0.0013239031369438325, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:22<02:16, 3.70s/it] 93%|█████████▎| 484/520 [30:25<02:13, 3.71s/it] {'loss': 1.1826, 'grad_norm': 0.001272715122591359, 'learning_rate': 0.0012536043909088192, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:25<02:13, 3.71s/it] 93%|█████████▎| 485/520 [30:29<02:09, 3.71s/it] {'loss': 1.133, 'grad_norm': 0.0012047841022917625, 'learning_rate': 0.001185199644003332, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:29<02:09, 3.71s/it] 93%|█████████▎| 486/520 [30:33<02:05, 3.70s/it] {'loss': 1.2522, 'grad_norm': 0.0012749670158121156, 'learning_rate': 0.0011186915540347731, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:33<02:05, 3.70s/it] 94%|█████████▎| 487/520 [30:37<02:02, 3.71s/it] {'loss': 1.1168, 'grad_norm': 0.0012019466319470476, 'learning_rate': 0.0010540827051175817, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:37<02:02, 3.71s/it] 94%|█████████▍| 488/520 [30:40<01:58, 3.71s/it] {'loss': 1.059, 'grad_norm': 0.0012697659447653074, 'learning_rate': 0.0009913756075728088, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:40<01:58, 3.71s/it] 94%|█████████▍| 489/520 [30:44<01:54, 3.70s/it] {'loss': 1.1796, 'grad_norm': 0.0010443209978152862, 'learning_rate': 0.0009305726978306172, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:44<01:54, 3.70s/it] 94%|█████████▍| 490/520 [30:48<01:50, 3.70s/it] {'loss': 1.178, 'grad_norm': 0.0012541432039401895, 'learning_rate': 0.0008716763383355864, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:48<01:50, 3.70s/it] 94%|█████████▍| 491/520 [30:51<01:47, 3.70s/it] {'loss': 1.1421, 'grad_norm': 0.001305451982915427, 'learning_rate': 0.0008146888174549339, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:51<01:47, 3.70s/it] 95%|█████████▍| 492/520 [30:55<01:43, 3.71s/it] {'loss': 1.2537, 'grad_norm': 0.0012606922771936874, 'learning_rate': 0.0007596123493895991, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:55<01:43, 3.71s/it] 95%|█████████▍| 493/520 [30:59<01:40, 3.71s/it] {'loss': 1.1735, 'grad_norm': 0.0012962347710469776, 'learning_rate': 0.0007064490740882057, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:59<01:40, 3.71s/it] 95%|█████████▌| 494/520 [31:02<01:36, 3.72s/it] {'loss': 1.1845, 'grad_norm': 0.0011366944304833165, 'learning_rate': 0.0006552010571639456, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:03<01:36, 3.72s/it] 95%|█████████▌| 495/520 [31:06<01:32, 3.71s/it] {'loss': 1.1681, 'grad_norm': 0.0012753453521620022, 'learning_rate': 0.0006058702898142643, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:06<01:32, 3.71s/it] 95%|█████████▌| 496/520 [31:10<01:29, 3.72s/it] {'loss': 1.0949, 'grad_norm': 0.0013106726828299167, 'learning_rate': 0.0005584586887435739, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:10<01:29, 3.72s/it] 96%|█████████▌| 497/520 [31:14<01:25, 3.71s/it] {'loss': 1.1086, 'grad_norm': 0.0010435197065032749, 'learning_rate': 0.0005129680960887006, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:14<01:25, 3.71s/it] 96%|█████████▌| 498/520 [31:17<01:21, 3.70s/it] {'loss': 1.1557, 'grad_norm': 0.0012431849034951203, 'learning_rate': 0.0004694002793473595, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:17<01:21, 3.70s/it] 96%|█████████▌| 499/520 [31:21<01:18, 3.72s/it] {'loss': 1.2453, 'grad_norm': 0.0011404799748292067, 'learning_rate': 0.0004277569313094809, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:21<01:18, 3.72s/it] 96%|█████████▌| 500/520 [31:25<01:14, 3.72s/it] {'loss': 1.2768, 'grad_norm': 0.0014081399232670903, 'learning_rate': 0.00038803966999139685, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:25<01:14, 3.72s/it] 96%|█████████▋| 501/520 [31:28<01:10, 3.71s/it] {'loss': 1.1481, 'grad_norm': 0.0012576905085282994, 'learning_rate': 0.000350250038573019, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:28<01:10, 3.71s/it] 97%|█████████▋| 502/520 [31:32<01:06, 3.72s/it] {'loss': 1.1911, 'grad_norm': 0.001195035795934756, 'learning_rate': 0.00031438950533786984, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:32<01:06, 3.72s/it] 97%|█████████▋| 503/520 [31:36<01:02, 3.69s/it] {'loss': 1.1366, 'grad_norm': 0.0011851313490654932, 'learning_rate': 0.00028045946361601184, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:36<01:02, 3.69s/it] 97%|█████████▋| 504/520 [31:40<00:59, 3.71s/it] {'loss': 1.1818, 'grad_norm': 0.0014248269249030375, 'learning_rate': 0.0002484612317299295, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:40<00:59, 3.71s/it] 97%|█████████▋| 505/520 [31:43<00:55, 3.69s/it] {'loss': 1.2122, 'grad_norm': 0.0012473520486006031, 'learning_rate': 0.00021839605294330934, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:43<00:55, 3.69s/it] 97%|█████████▋| 506/520 [31:47<00:51, 3.71s/it] {'loss': 1.1469, 'grad_norm': 0.0013262747989006346, 'learning_rate': 0.00019026509541272276, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:47<00:51, 3.71s/it] 98%|█████████▊| 507/520 [31:51<00:48, 3.71s/it] {'loss': 1.2798, 'grad_norm': 0.0010948192407902644, 'learning_rate': 0.0001640694521422459, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:51<00:48, 3.71s/it] 98%|█████████▊| 508/520 [31:54<00:44, 3.71s/it] {'loss': 1.2632, 'grad_norm': 0.0012762653561905442, 'learning_rate': 0.00013981014094099353, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:54<00:44, 3.71s/it] 98%|█████████▊| 509/520 [31:58<00:40, 3.69s/it] {'loss': 1.2341, 'grad_norm': 0.0011911051162159423, 'learning_rate': 0.00011748810438355628, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:58<00:40, 3.69s/it] 98%|█████████▊| 510/520 [32:02<00:37, 3.71s/it] {'loss': 1.1849, 'grad_norm': 0.0012454845985953597, 'learning_rate': 9.710420977340761e-05, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:02<00:37, 3.71s/it] 98%|█████████▊| 511/520 [32:05<00:33, 3.70s/it] {'loss': 1.1448, 'grad_norm': 0.0012201887359970679, 'learning_rate': 7.865924910916978e-05, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:05<00:33, 3.70s/it] 98%|█████████▊| 512/520 [32:09<00:29, 3.72s/it] {'loss': 1.0399, 'grad_norm': 0.0012037612556525603, 'learning_rate': 6.215393905388278e-05, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:09<00:29, 3.72s/it] 99%|█████████▊| 513/520 [32:13<00:25, 3.71s/it] {'loss': 1.2344, 'grad_norm': 0.0013754861597051462, 'learning_rate': 4.758892090711009e-05, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:13<00:25, 3.71s/it] 99%|█████████▉| 514/520 [32:17<00:22, 3.71s/it] {'loss': 1.2033, 'grad_norm': 0.0011410645497357435, 'learning_rate': 3.496476058006959e-05, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:17<00:22, 3.71s/it] 99%|█████████▉| 515/520 [32:20<00:18, 3.69s/it] {'loss': 1.255, 'grad_norm': 0.0014618790371234704, 'learning_rate': 2.4281948573617874e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:20<00:18, 3.69s/it] 99%|█████████▉| 516/520 [32:24<00:14, 3.72s/it] {'loss': 1.1708, 'grad_norm': 0.0012358320652189954, 'learning_rate': 1.5540899959187725e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:24<00:14, 3.72s/it] 99%|█████████▉| 517/520 [32:28<00:11, 3.69s/it] {'loss': 1.1788, 'grad_norm': 0.0011562702552583523, 'learning_rate': 8.741954362678772e-06, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:28<00:11, 3.69s/it] 100%|█████████▉| 518/520 [32:31<00:07, 3.68s/it] {'loss': 1.1735, 'grad_norm': 0.0012948113130142344, 'learning_rate': 3.885375951256931e-06, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:31<00:07, 3.68s/it] 100%|█████████▉| 519/520 [32:35<00:03, 3.68s/it] {'loss': 1.1538, 'grad_norm': 0.0011960113566554817, 'learning_rate': 9.713534230904043e-07, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:35<00:03, 3.68s/it] 100%|██████████| 520/520 [32:40<00:00, 3.94s/it] {'loss': 1.142, 'grad_norm': 0.0010591999946751285, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:40<00:00, 3.94s/it] {'train_runtime': 1960.0807, 'train_samples_per_second': 33.942, 'train_steps_per_second': 0.265, 'train_loss': 1.2259624202664081, 'epoch': 1.0} + 100%|██████████| 520/520 [32:40<00:00, 3.94s/it] 100%|██████████| 520/520 [32:40<00:00, 3.77s/it] +[2025-10-09 08:11:50,107] [INFO] [launch.py:348:main] Process 887207 exits successfully. +[2025-10-09 08:11:50,108] [INFO] [launch.py:348:main] Process 887209 exits successfully. +[2025-10-09 08:11:50,108] [INFO] [launch.py:348:main] Process 887203 exits successfully. +[2025-10-09 08:11:51,110] [INFO] [launch.py:348:main] Process 887208 exits successfully. +[2025-10-09 08:11:51,110] [INFO] [launch.py:348:main] Process 887205 exits successfully. +[2025-10-09 08:11:51,110] [INFO] [launch.py:348:main] Process 887206 exits successfully. +[2025-10-09 08:11:51,111] [INFO] [launch.py:348:main] Process 887204 exits successfully. +[2025-10-09 08:11:55,115] [INFO] [launch.py:348:main] Process 887202 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-1_connector-3.0_0.5_1e-1_ablation_20251009_073254.log +Timestamp: 2025-10-09 08:11:57 +===================================== diff --git a/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation_20251009_105701.log b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation_20251009_105701.log new file mode 100644 index 0000000000000000000000000000000000000000..8396be2b2c9d081c7ec6bfbf9d58e24779b4385a --- /dev/null +++ b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation_20251009_105701.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation_20251009_105701.log +Timestamp: 2025-10-09 10:57:01 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 10:57:03,907] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:57:06,786] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-09 10:57:06,787] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 1e-2 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 1e-2 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 10:57:09,413] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:57:10,456] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-09 10:57:10,456] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-09 10:57:10,456] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-09 10:57:10,456] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-09 10:57:10,456] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-09 10:57:10,456] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-09 10:57:10,456] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-09 10:57:10,458] [INFO] [launch.py:253:main] process 1178054 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:57:10,461] [INFO] [launch.py:253:main] process 1178055 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:57:10,463] [INFO] [launch.py:253:main] process 1178056 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:57:10,465] [INFO] [launch.py:253:main] process 1178057 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:57:10,467] [INFO] [launch.py:253:main] process 1178058 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:57:10,469] [INFO] [launch.py:253:main] process 1178059 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:57:10,471] [INFO] [launch.py:253:main] process 1178060 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:57:10,473] [INFO] [launch.py:253:main] process 1178061 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '1e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '1e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 10:57:17,006] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:57:17,319] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:57:17,327] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:57:17,335] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:57:17,363] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:57:17,363] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:57:17,368] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:57:17,379] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:57:17,426] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:57:17,745] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:57:17,747] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:57:17,761] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:57:17,773] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:57:17,775] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:57:17,780] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:57:17,795] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:57:17,796] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1178054:1178054 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178054:1178054 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178054:1178054 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1178054:1178054 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1178054:1178054 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1178054:1178054 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:1178057:1178057 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1178057:1178057 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178057:1178057 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178057:1178057 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1178057:1178057 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1178057:1178057 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1178059:1178059 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1178059:1178059 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178059:1178059 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178055:1178055 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1178055:1178055 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178055:1178055 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178059:1178059 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1178059:1178059 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1178059:1178059 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1178055:1178055 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1178055:1178055 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1178055:1178055 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1178061:1178061 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1178061:1178061 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178061:1178061 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178061:1178061 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1178061:1178061 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1178061:1178061 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1178056:1178056 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1178056:1178056 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178056:1178056 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178056:1178056 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1178056:1178056 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1178056:1178056 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1178058:1178058 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1178058:1178058 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178058:1178058 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178058:1178058 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1178058:1178058 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1178058:1178058 [4] NCCL INFO NET/Plugin: Using internal network plugin. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1178060:1178060 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1178060:1178060 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178060:1178060 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178060:1178060 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1178060:1178060 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1178060:1178060 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO ncclCommInitRank comm 0x562548496cf0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xb1efba728080df42 - Init START +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO ncclCommInitRank comm 0x5600b4a7bcc0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xb1efba728080df42 - Init START +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO ncclCommInitRank comm 0x561be1419e60 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xb1efba728080df42 - Init START +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO ncclCommInitRank comm 0x55f827852b40 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xb1efba728080df42 - Init START +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO ncclCommInitRank comm 0x5597034280d0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xb1efba728080df42 - Init START +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO ncclCommInitRank comm 0x55f470573150 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xb1efba728080df42 - Init START +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO ncclCommInitRank comm 0x55a683378c00 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xb1efba728080df42 - Init START +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO ncclCommInitRank comm 0x559e7d055750 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xb1efba728080df42 - Init START +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO comm 0x55a683378c00 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO comm 0x5600b4a7bcc0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO comm 0x55f827852b40 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO comm 0x562548496cf0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO comm 0x559e7d055750 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO comm 0x5597034280d0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO comm 0x55f470573150 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO comm 0x561be1419e60 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1178061:1179695 [7] NCCL INFO ncclCommInitRank comm 0x55f470573150 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xb1efba728080df42 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178059:1179693 [5] NCCL INFO ncclCommInitRank comm 0x561be1419e60 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xb1efba728080df42 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178060:1179714 [6] NCCL INFO ncclCommInitRank comm 0x5597034280d0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xb1efba728080df42 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1178058:1179697 [4] NCCL INFO ncclCommInitRank comm 0x562548496cf0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xb1efba728080df42 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1178057:1179692 [3] NCCL INFO ncclCommInitRank comm 0x55a683378c00 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xb1efba728080df42 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178056:1179696 [2] NCCL INFO ncclCommInitRank comm 0x5600b4a7bcc0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xb1efba728080df42 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178055:1179694 [1] NCCL INFO ncclCommInitRank comm 0x55f827852b40 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xb1efba728080df42 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178054:1179691 [0] NCCL INFO ncclCommInitRank comm 0x559e7d055750 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xb1efba728080df42 - Init COMPLETE +[2025-10-09 10:57:59,389] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-09 10:58:01,230] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-09 10:58:14,796 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-09 10:58:14,802 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:006->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1178059:1184674 [5] NCCL INFO ncclCommInitRank comm 0x7fab7406b500 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x824a076d9f9e566c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178055:1184672 [1] NCCL INFO ncclCommInitRank comm 0x7ef72c06ae70 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x824a076d9f9e566c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178058:1184673 [4] NCCL INFO ncclCommInitRank comm 0x7ff5b806af40 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x824a076d9f9e566c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178060:1184676 [6] NCCL INFO ncclCommInitRank comm 0x7f6b3406ae50 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x824a076d9f9e566c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178054:1184671 [0] NCCL INFO ncclCommInitRank comm 0x7f7cc406af70 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x824a076d9f9e566c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178057:1184678 [3] NCCL INFO ncclCommInitRank comm 0x7fe65006abf0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x824a076d9f9e566c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178056:1184675 [2] NCCL INFO ncclCommInitRank comm 0x7f14f006a6f0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x824a076d9f9e566c - Init COMPLETE +ywang29-vrdb-test1-worker-0:1178061:1184677 [7] NCCL INFO ncclCommInitRank comm 0x7fb98406a3d0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x824a076d9f9e566c - Init COMPLETE + 0%| | 1/520 [00:12<1:47:38, 12.44s/it] {'loss': 2.0453, 'grad_norm': 0.004834212432014062, 'learning_rate': 0.000625, 'epoch': 0.0} + 0%| | 1/520 [00:12<1:47:38, 12.44s/it] 0%| | 2/520 [00:16<1:03:22, 7.34s/it] {'loss': 2.0549, 'grad_norm': 0.005249508764637722, 'learning_rate': 0.00125, 'epoch': 0.0} + 0%| | 2/520 [00:16<1:03:22, 7.34s/it] 1%| | 3/520 [00:19<49:07, 5.70s/it] {'loss': 2.1899, 'grad_norm': 0.006006929888946754, 'learning_rate': 0.001875, 'epoch': 0.01} + 1%| | 3/520 [00:19<49:07, 5.70s/it] 1%| | 4/520 [00:23<42:11, 4.91s/it] {'loss': 2.0656, 'grad_norm': 0.0049635417966976255, 'learning_rate': 0.0025, 'epoch': 0.01} + 1%| | 4/520 [00:23<42:11, 4.91s/it] 1%| | 5/520 [00:27<38:30, 4.49s/it] {'loss': 2.2333, 'grad_norm': 0.005481129057616417, 'learning_rate': 0.003125, 'epoch': 0.01} + 1%| | 5/520 [00:27<38:30, 4.49s/it] 1%| | 6/520 [00:31<35:56, 4.20s/it] {'loss': 1.6754, 'grad_norm': 0.0028032207059699495, 'learning_rate': 0.00375, 'epoch': 0.01} + 1%| | 6/520 [00:31<35:56, 4.20s/it] 1%|▏ | 7/520 [00:34<34:30, 4.04s/it] {'loss': 2.0776, 'grad_norm': 0.0054148012597949645, 'learning_rate': 0.004375, 'epoch': 0.01} + 1%|▏ | 7/520 [00:34<34:30, 4.04s/it] 2%|▏ | 8/520 [00:39<35:13, 4.13s/it] {'loss': 2.0541, 'grad_norm': 0.004571424377442317, 'learning_rate': 0.005, 'epoch': 0.02} + 2%|▏ | 8/520 [00:39<35:13, 4.13s/it] 2%|▏ | 9/520 [00:43<35:23, 4.16s/it] {'loss': 2.19, 'grad_norm': 0.005017996425635469, 'learning_rate': 0.005625, 'epoch': 0.02} + 2%|▏ | 9/520 [00:43<35:23, 4.16s/it] 2%|▏ | 10/520 [00:47<34:21, 4.04s/it] {'loss': 2.0841, 'grad_norm': 0.005625472534243473, 'learning_rate': 0.00625, 'epoch': 0.02} + 2%|▏ | 10/520 [00:47<34:21, 4.04s/it] 2%|▏ | 11/520 [00:50<33:50, 3.99s/it] {'loss': 2.0582, 'grad_norm': 0.0049570499802454576, 'learning_rate': 0.006875, 'epoch': 0.02} + 2%|▏ | 11/520 [00:50<33:50, 3.99s/it] 2%|▏ | 12/520 [00:54<33:04, 3.91s/it] {'loss': 1.879, 'grad_norm': 0.0043599823443547575, 'learning_rate': 0.0075, 'epoch': 0.02} + 2%|▏ | 12/520 [00:54<33:04, 3.91s/it][2025-10-09 10:59:18,571] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [00:59<34:13, 4.05s/it] {'loss': 2.0676, 'grad_norm': 0.004886461749790401, 'learning_rate': 0.008125, 'epoch': 0.03} + 2%|▎ | 13/520 [00:59<34:13, 4.05s/it] 3%|▎ | 14/520 [01:02<33:15, 3.94s/it] {'loss': 2.1081, 'grad_norm': 0.004984618966994318, 'learning_rate': 0.00875, 'epoch': 0.03} + 3%|▎ | 14/520 [01:02<33:15, 3.94s/it] 3%|▎ | 15/520 [01:06<32:37, 3.88s/it] {'loss': 1.7457, 'grad_norm': 0.002705432927166165, 'learning_rate': 0.009375, 'epoch': 0.03} + 3%|▎ | 15/520 [01:06<32:37, 3.88s/it] 3%|▎ | 16/520 [01:10<32:13, 3.84s/it] {'loss': 1.8915, 'grad_norm': 0.0041612684030986865, 'learning_rate': 0.01, 'epoch': 0.03} + 3%|▎ | 16/520 [01:10<32:13, 3.84s/it] 3%|▎ | 17/520 [01:13<32:02, 3.82s/it] {'loss': 2.1108, 'grad_norm': 0.004627573407223861, 'learning_rate': 0.00999990286465769, 'epoch': 0.03} + 3%|▎ | 17/520 [01:13<32:02, 3.82s/it] 3%|▎ | 18/520 [01:17<32:15, 3.86s/it] {'loss': 2.1647, 'grad_norm': 0.006087940472865743, 'learning_rate': 0.009999611462404873, 'epoch': 0.03} + 3%|▎ | 18/520 [01:17<32:15, 3.86s/it] 4%|▎ | 19/520 [01:21<32:18, 3.87s/it] {'loss': 1.6027, 'grad_norm': 0.0014035202165354475, 'learning_rate': 0.009999125804563733, 'epoch': 0.04} + 4%|▎ | 19/520 [01:21<32:18, 3.87s/it] 4%|▍ | 20/520 [01:25<32:25, 3.89s/it] {'loss': 1.7057, 'grad_norm': 0.0033901335841310825, 'learning_rate': 0.00999844591000408, 'epoch': 0.04} + 4%|▍ | 20/520 [01:25<32:25, 3.89s/it] 4%|▍ | 21/520 [01:29<31:58, 3.84s/it] {'loss': 1.5825, 'grad_norm': 0.0024003802201287513, 'learning_rate': 0.009997571805142639, 'epoch': 0.04} + 4%|▍ | 21/520 [01:29<31:58, 3.84s/it] 4%|▍ | 22/520 [01:33<31:34, 3.80s/it] {'loss': 1.6469, 'grad_norm': 0.0010921316890926778, 'learning_rate': 0.009996503523941993, 'epoch': 0.04} + 4%|▍ | 22/520 [01:33<31:34, 3.80s/it] 4%|▍ | 23/520 [01:36<31:24, 3.79s/it] {'loss': 1.5712, 'grad_norm': 0.0008977355970160862, 'learning_rate': 0.00999524110790929, 'epoch': 0.04} + 4%|▍ | 23/520 [01:36<31:24, 3.79s/it] 5%|▍ | 24/520 [01:40<31:08, 3.77s/it] {'loss': 1.4757, 'grad_norm': 0.0007913054730297956, 'learning_rate': 0.00999378460609461, 'epoch': 0.05} + 5%|▍ | 24/520 [01:40<31:08, 3.77s/it] 5%|▍ | 25/520 [01:44<30:51, 3.74s/it] {'loss': 1.6103, 'grad_norm': 0.0009006970401416891, 'learning_rate': 0.009992134075089084, 'epoch': 0.05} + 5%|▍ | 25/520 [01:44<30:51, 3.74s/it] 5%|▌ | 26/520 [01:48<30:49, 3.74s/it] {'loss': 1.4905, 'grad_norm': 0.0005922408199469874, 'learning_rate': 0.00999028957902266, 'epoch': 0.05} + 5%|▌ | 26/520 [01:48<30:49, 3.74s/it] 5%|▌ | 27/520 [01:51<30:26, 3.70s/it] {'loss': 1.427, 'grad_norm': 0.0005602296196518146, 'learning_rate': 0.009988251189561644, 'epoch': 0.05} + 5%|▌ | 27/520 [01:51<30:26, 3.70s/it] 5%|▌ | 28/520 [01:55<30:16, 3.69s/it] {'loss': 1.4663, 'grad_norm': 0.0005686029210178931, 'learning_rate': 0.0099860189859059, 'epoch': 0.05} + 5%|▌ | 28/520 [01:55<30:16, 3.69s/it] 6%|▌ | 29/520 [01:59<30:10, 3.69s/it] {'loss': 1.4574, 'grad_norm': 0.0005053999509975156, 'learning_rate': 0.009983593054785776, 'epoch': 0.06} + 6%|▌ | 29/520 [01:59<30:10, 3.69s/it] 6%|▌ | 30/520 [02:02<30:03, 3.68s/it] {'loss': 1.5057, 'grad_norm': 0.0004203313290252519, 'learning_rate': 0.009980973490458728, 'epoch': 0.06} + 6%|▌ | 30/520 [02:02<30:03, 3.68s/it] 6%|▌ | 31/520 [02:06<29:56, 3.67s/it] {'loss': 1.4268, 'grad_norm': 0.00043958427692987863, 'learning_rate': 0.009978160394705669, 'epoch': 0.06} + 6%|▌ | 31/520 [02:06<29:56, 3.67s/it] 6%|▌ | 32/520 [02:10<29:52, 3.67s/it] {'loss': 1.2892, 'grad_norm': 0.0003765177391262753, 'learning_rate': 0.009975153876827007, 'epoch': 0.06} + 6%|▌ | 32/520 [02:10<29:52, 3.67s/it] 6%|▋ | 33/520 [02:13<29:48, 3.67s/it] {'loss': 1.4434, 'grad_norm': 0.0004844040022710791, 'learning_rate': 0.0099719540536384, 'epoch': 0.06} + 6%|▋ | 33/520 [02:13<29:48, 3.67s/it] 7%|▋ | 34/520 [02:17<29:58, 3.70s/it] {'loss': 1.4304, 'grad_norm': 0.0004902998013971571, 'learning_rate': 0.009968561049466213, 'epoch': 0.07} + 7%|▋ | 34/520 [02:17<29:58, 3.70s/it] 7%|▋ | 35/520 [02:21<29:43, 3.68s/it] {'loss': 1.4278, 'grad_norm': 0.00046234193438404535, 'learning_rate': 0.009964974996142698, 'epoch': 0.07} + 7%|▋ | 35/520 [02:21<29:43, 3.68s/it] 7%|▋ | 36/520 [02:24<29:32, 3.66s/it] {'loss': 1.5164, 'grad_norm': 0.0004209617165246261, 'learning_rate': 0.009961196033000861, 'epoch': 0.07} + 7%|▋ | 36/520 [02:24<29:32, 3.66s/it] 7%|▋ | 37/520 [02:28<29:28, 3.66s/it] {'loss': 1.4622, 'grad_norm': 0.0003426223139868316, 'learning_rate': 0.009957224306869053, 'epoch': 0.07} + 7%|▋ | 37/520 [02:28<29:28, 3.66s/it] 7%|▋ | 38/520 [02:32<29:20, 3.65s/it] {'loss': 1.5762, 'grad_norm': 0.0003876630067694548, 'learning_rate': 0.009953059972065264, 'epoch': 0.07} + 7%|▋ | 38/520 [02:32<29:20, 3.65s/it] 8%|▊ | 39/520 [02:35<29:17, 3.65s/it] {'loss': 1.4821, 'grad_norm': 0.00047297224373102776, 'learning_rate': 0.009948703190391131, 'epoch': 0.07} + 8%|▊ | 39/520 [02:35<29:17, 3.65s/it] 8%|▊ | 40/520 [02:39<29:15, 3.66s/it] {'loss': 1.4743, 'grad_norm': 0.0003549500047931743, 'learning_rate': 0.009944154131125642, 'epoch': 0.08} + 8%|▊ | 40/520 [02:39<29:15, 3.66s/it] 8%|▊ | 41/520 [02:43<29:22, 3.68s/it] {'loss': 1.4653, 'grad_norm': 0.00038652947724416675, 'learning_rate': 0.009939412971018573, 'epoch': 0.08} + 8%|▊ | 41/520 [02:43<29:22, 3.68s/it] 8%|▊ | 42/520 [02:46<29:41, 3.73s/it] {'loss': 1.4607, 'grad_norm': 0.0004282464679388294, 'learning_rate': 0.009934479894283605, 'epoch': 0.08} + 8%|▊ | 42/520 [02:46<29:41, 3.73s/it] 8%|▊ | 43/520 [02:50<30:00, 3.77s/it] {'loss': 1.3608, 'grad_norm': 0.0003449699080005181, 'learning_rate': 0.009929355092591179, 'epoch': 0.08} + 8%|▊ | 43/520 [02:50<30:00, 3.77s/it] 8%|▊ | 44/520 [02:54<30:16, 3.82s/it] {'loss': 1.4593, 'grad_norm': 0.00034262578065003614, 'learning_rate': 0.00992403876506104, 'epoch': 0.08} + 8%|▊ | 44/520 [02:54<30:16, 3.82s/it] 9%|▊ | 45/520 [02:58<30:26, 3.84s/it] {'loss': 1.4828, 'grad_norm': 0.0003925653906415374, 'learning_rate': 0.009918531118254506, 'epoch': 0.09} + 9%|▊ | 45/520 [02:58<30:26, 3.84s/it] 9%|▉ | 46/520 [03:02<30:24, 3.85s/it] {'loss': 1.4804, 'grad_norm': 0.00030616517662849504, 'learning_rate': 0.009912832366166443, 'epoch': 0.09} + 9%|▉ | 46/520 [03:02<30:24, 3.85s/it] 9%|▉ | 47/520 [03:06<30:20, 3.85s/it] {'loss': 1.4444, 'grad_norm': 0.0003647645610376994, 'learning_rate': 0.00990694273021694, 'epoch': 0.09} + 9%|▉ | 47/520 [03:06<30:20, 3.85s/it] 9%|▉ | 48/520 [03:10<30:22, 3.86s/it] {'loss': 1.4812, 'grad_norm': 0.0004173076658546845, 'learning_rate': 0.00990086243924272, 'epoch': 0.09} + 9%|▉ | 48/520 [03:10<30:22, 3.86s/it] 9%|▉ | 49/520 [03:14<30:13, 3.85s/it] {'loss': 1.4908, 'grad_norm': 0.0003596010802686445, 'learning_rate': 0.009894591729488242, 'epoch': 0.09} + 9%|▉ | 49/520 [03:14<30:13, 3.85s/it] 10%|▉ | 50/520 [03:17<30:13, 3.86s/it] {'loss': 1.4927, 'grad_norm': 0.00036842340895544876, 'learning_rate': 0.009888130844596523, 'epoch': 0.1} + 10%|▉ | 50/520 [03:17<30:13, 3.86s/it] 10%|▉ | 51/520 [03:21<29:59, 3.84s/it] {'loss': 1.4198, 'grad_norm': 0.0003791364474173965, 'learning_rate': 0.009881480035599667, 'epoch': 0.1} + 10%|▉ | 51/520 [03:21<29:59, 3.84s/it] 10%|█ | 52/520 [03:25<29:54, 3.83s/it] {'loss': 1.5442, 'grad_norm': 0.0003517537622093855, 'learning_rate': 0.009874639560909117, 'epoch': 0.1} + 10%|█ | 52/520 [03:25<29:54, 3.83s/it] 10%|█ | 53/520 [03:29<29:50, 3.83s/it] {'loss': 1.5152, 'grad_norm': 0.0003421965977857088, 'learning_rate': 0.009867609686305616, 'epoch': 0.1} + 10%|█ | 53/520 [03:29<29:50, 3.83s/it] 10%|█ | 54/520 [03:33<29:46, 3.83s/it] {'loss': 1.46, 'grad_norm': 0.0003382649478210031, 'learning_rate': 0.009860390684928872, 'epoch': 0.1} + 10%|█ | 54/520 [03:33<29:46, 3.83s/it] 11%|█ | 55/520 [03:37<29:38, 3.83s/it] {'loss': 1.4047, 'grad_norm': 0.0003560319295070763, 'learning_rate': 0.009852982837266955, 'epoch': 0.11} + 11%|█ | 55/520 [03:37<29:38, 3.83s/it] 11%|█ | 56/520 [03:40<29:33, 3.82s/it] {'loss': 1.5286, 'grad_norm': 0.0003419486948174586, 'learning_rate': 0.00984538643114539, 'epoch': 0.11} + 11%|█ | 56/520 [03:40<29:33, 3.82s/it] 11%|█ | 57/520 [03:44<29:36, 3.84s/it] {'loss': 1.4323, 'grad_norm': 0.000364810623111462, 'learning_rate': 0.009837601761715982, 'epoch': 0.11} + 11%|█ | 57/520 [03:44<29:36, 3.84s/it] 11%|█ | 58/520 [03:48<29:34, 3.84s/it] {'loss': 1.5251, 'grad_norm': 0.0002903423161480012, 'learning_rate': 0.009829629131445342, 'epoch': 0.11} + 11%|█ | 58/520 [03:48<29:34, 3.84s/it] 11%|█▏ | 59/520 [03:52<29:21, 3.82s/it] {'loss': 1.344, 'grad_norm': 0.0003109058696289027, 'learning_rate': 0.009821468850103139, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:52<29:21, 3.82s/it] 12%|█▏ | 60/520 [03:56<29:32, 3.85s/it] {'loss': 1.4481, 'grad_norm': 0.0003143473178214141, 'learning_rate': 0.009813121234750059, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:56<29:32, 3.85s/it] 12%|█▏ | 61/520 [03:59<29:11, 3.82s/it] {'loss': 1.4074, 'grad_norm': 0.00032991829874899826, 'learning_rate': 0.009804586609725498, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:59<29:11, 3.82s/it] 12%|█▏ | 62/520 [04:03<28:41, 3.76s/it] {'loss': 1.4304, 'grad_norm': 0.0003320202514240811, 'learning_rate': 0.009795865306634939, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:03<28:41, 3.76s/it] 12%|█▏ | 63/520 [04:07<28:32, 3.75s/it] {'loss': 1.4413, 'grad_norm': 0.0003064641732400249, 'learning_rate': 0.009786957664337091, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:07<28:32, 3.75s/it] 12%|█▏ | 64/520 [04:10<28:16, 3.72s/it] {'loss': 1.4643, 'grad_norm': 0.0003116798742817919, 'learning_rate': 0.009777864028930704, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:10<28:16, 3.72s/it] 12%|█▎ | 65/520 [04:14<28:07, 3.71s/it] {'loss': 1.4758, 'grad_norm': 0.0003474242032317868, 'learning_rate': 0.009768584753741134, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:14<28:07, 3.71s/it] 13%|█▎ | 66/520 [04:18<27:57, 3.69s/it] {'loss': 1.4044, 'grad_norm': 0.0003103746369524356, 'learning_rate': 0.009759120199306612, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:18<27:57, 3.69s/it] 13%|█▎ | 67/520 [04:21<27:50, 3.69s/it] {'loss': 1.3355, 'grad_norm': 0.00030868556553699435, 'learning_rate': 0.00974947073336423, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:21<27:50, 3.69s/it] 13%|█▎ | 68/520 [04:25<27:58, 3.71s/it] {'loss': 1.4041, 'grad_norm': 0.0003008752514306429, 'learning_rate': 0.009739636730835659, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:25<27:58, 3.71s/it] 13%|█▎ | 69/520 [04:29<27:55, 3.71s/it] {'loss': 1.3979, 'grad_norm': 0.0003457740866236928, 'learning_rate': 0.00972961857381258, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:29<27:55, 3.71s/it] 13%|█▎ | 70/520 [04:33<27:42, 3.69s/it] {'loss': 1.3966, 'grad_norm': 0.0002987014924633415, 'learning_rate': 0.009719416651541838, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:33<27:42, 3.69s/it] 14%|█▎ | 71/520 [04:36<27:43, 3.71s/it] {'loss': 1.3528, 'grad_norm': 0.0003032944784030942, 'learning_rate': 0.009709031360410318, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:36<27:43, 3.71s/it] 14%|█▍ | 72/520 [04:40<27:29, 3.68s/it] {'loss': 1.5072, 'grad_norm': 0.00032962923109376187, 'learning_rate': 0.009698463103929543, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:40<27:29, 3.68s/it] 14%|█▍ | 73/520 [04:44<27:33, 3.70s/it] {'loss': 1.316, 'grad_norm': 0.00030162636663080627, 'learning_rate': 0.009687712292719997, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:44<27:33, 3.70s/it] 14%|█▍ | 74/520 [04:47<27:28, 3.70s/it] {'loss': 1.4219, 'grad_norm': 0.0003362810898083741, 'learning_rate': 0.00967677934449517, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:47<27:28, 3.70s/it] 14%|█▍ | 75/520 [04:51<27:18, 3.68s/it] {'loss': 1.3463, 'grad_norm': 0.0002704203895672372, 'learning_rate': 0.009665664684045332, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:51<27:18, 3.68s/it] 15%|█▍ | 76/520 [04:55<27:31, 3.72s/it] {'loss': 1.4251, 'grad_norm': 0.0002565088415913162, 'learning_rate': 0.009654368743221021, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:55<27:31, 3.72s/it] 15%|█▍ | 77/520 [04:59<27:46, 3.76s/it] {'loss': 1.3013, 'grad_norm': 0.00035403994686607985, 'learning_rate': 0.009642891960916267, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:59<27:46, 3.76s/it] 15%|█▌ | 78/520 [05:03<27:57, 3.79s/it] {'loss': 1.3807, 'grad_norm': 0.00030882015869900935, 'learning_rate': 0.009631234783051543, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:03<27:57, 3.79s/it] 15%|█▌ | 79/520 [05:06<27:56, 3.80s/it] {'loss': 1.3576, 'grad_norm': 0.00029058353471458133, 'learning_rate': 0.009619397662556433, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:06<27:56, 3.80s/it] 15%|█▌ | 80/520 [05:10<27:57, 3.81s/it] {'loss': 1.4097, 'grad_norm': 0.00029708471760726417, 'learning_rate': 0.009607381059352039, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:10<27:57, 3.81s/it] 16%|█▌ | 81/520 [05:14<27:54, 3.81s/it] {'loss': 1.5238, 'grad_norm': 0.0003756120084825822, 'learning_rate': 0.009595185440333102, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:14<27:54, 3.81s/it] 16%|█▌ | 82/520 [05:18<27:53, 3.82s/it] {'loss': 1.4343, 'grad_norm': 0.00030493952921845546, 'learning_rate': 0.009582811279349882, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:18<27:53, 3.82s/it] 16%|█▌ | 83/520 [05:22<27:27, 3.77s/it] {'loss': 1.4277, 'grad_norm': 0.00030544689790448006, 'learning_rate': 0.009570259057189716, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:22<27:27, 3.77s/it] 16%|█▌ | 84/520 [05:25<27:14, 3.75s/it] {'loss': 1.4572, 'grad_norm': 0.00031971212739107544, 'learning_rate': 0.009557529261558366, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:25<27:14, 3.75s/it] 16%|█▋ | 85/520 [05:29<27:02, 3.73s/it] {'loss': 1.5066, 'grad_norm': 0.0002805412591297277, 'learning_rate': 0.009544622387061054, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:29<27:02, 3.73s/it] 17%|█▋ | 86/520 [05:33<26:56, 3.73s/it] {'loss': 1.4757, 'grad_norm': 0.0002909981110631255, 'learning_rate': 0.009531538935183251, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:33<26:56, 3.73s/it] 17%|█▋ | 87/520 [05:36<26:58, 3.74s/it] {'loss': 1.3741, 'grad_norm': 0.00027744714027783254, 'learning_rate': 0.009518279414271184, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:36<26:58, 3.74s/it] 17%|█▋ | 88/520 [05:40<26:47, 3.72s/it] {'loss': 1.2618, 'grad_norm': 0.00024920506094313716, 'learning_rate': 0.009504844339512096, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:40<26:47, 3.72s/it] 17%|█▋ | 89/520 [05:44<26:34, 3.70s/it] {'loss': 1.446, 'grad_norm': 0.00031715937797479314, 'learning_rate': 0.00949123423291422, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:44<26:34, 3.70s/it] 17%|█▋ | 90/520 [05:47<26:24, 3.69s/it] {'loss': 1.387, 'grad_norm': 0.0003177625518240911, 'learning_rate': 0.009477449623286505, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:47<26:24, 3.69s/it] 18%|█▊ | 91/520 [05:51<26:17, 3.68s/it] {'loss': 1.43, 'grad_norm': 0.0002754026363617, 'learning_rate': 0.009463491046218058, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:51<26:17, 3.68s/it] 18%|█▊ | 92/520 [05:55<26:18, 3.69s/it] {'loss': 1.3711, 'grad_norm': 0.00031349242816472775, 'learning_rate': 0.009449359044057344, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:55<26:18, 3.69s/it] 18%|█▊ | 93/520 [05:58<26:12, 3.68s/it] {'loss': 1.3958, 'grad_norm': 0.0003173866281736024, 'learning_rate': 0.009435054165891108, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:58<26:12, 3.68s/it] 18%|█▊ | 94/520 [06:02<25:58, 3.66s/it] {'loss': 1.4723, 'grad_norm': 0.0003743833333987811, 'learning_rate': 0.00942057696752305, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:02<25:58, 3.66s/it] 18%|█▊ | 95/520 [06:06<26:03, 3.68s/it] {'loss': 1.4046, 'grad_norm': 0.00034491294586497614, 'learning_rate': 0.009405928011452212, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:06<26:03, 3.68s/it] 18%|█▊ | 96/520 [06:10<26:04, 3.69s/it] {'loss': 1.3925, 'grad_norm': 0.00030405259267083767, 'learning_rate': 0.009391107866851142, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:10<26:04, 3.69s/it] 19%|█▊ | 97/520 [06:13<25:59, 3.69s/it] {'loss': 1.386, 'grad_norm': 0.0003564049443705761, 'learning_rate': 0.00937611710954377, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:13<25:59, 3.69s/it] 19%|█▉ | 98/520 [06:17<25:56, 3.69s/it] {'loss': 1.361, 'grad_norm': 0.00025997659827865636, 'learning_rate': 0.009360956321983027, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:17<25:56, 3.69s/it] 19%|█▉ | 99/520 [06:21<25:50, 3.68s/it] {'loss': 1.3656, 'grad_norm': 0.00031454559233462683, 'learning_rate': 0.009345626093228232, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:21<25:50, 3.68s/it] 19%|█▉ | 100/520 [06:24<25:46, 3.68s/it] {'loss': 1.3215, 'grad_norm': 0.00027297412958652515, 'learning_rate': 0.009330127018922194, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:24<25:46, 3.68s/it] 19%|█▉ | 101/520 [06:28<25:38, 3.67s/it] {'loss': 1.3952, 'grad_norm': 0.00030314629540563074, 'learning_rate': 0.009314459701268065, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:28<25:38, 3.67s/it] 20%|█▉ | 102/520 [06:31<25:28, 3.66s/it] {'loss': 1.4202, 'grad_norm': 0.00033581353067144235, 'learning_rate': 0.009298624749005951, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:32<25:28, 3.66s/it] 20%|█▉ | 103/520 [06:35<25:29, 3.67s/it] {'loss': 1.3272, 'grad_norm': 0.0002966098213821895, 'learning_rate': 0.009282622777389258, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:35<25:29, 3.67s/it] 20%|██ | 104/520 [06:39<25:25, 3.67s/it] {'loss': 1.419, 'grad_norm': 0.0003110550500252251, 'learning_rate': 0.009266454408160778, 'epoch': 0.2} + 20%|██ | 104/520 [06:39<25:25, 3.67s/it] 20%|██ | 105/520 [06:42<25:15, 3.65s/it] {'loss': 1.3877, 'grad_norm': 0.0002785716485802124, 'learning_rate': 0.009250120269528546, 'epoch': 0.2} + 20%|██ | 105/520 [06:42<25:15, 3.65s/it] 20%|██ | 106/520 [06:46<25:19, 3.67s/it] {'loss': 1.3364, 'grad_norm': 0.00025057197974221, 'learning_rate': 0.00923362099614142, 'epoch': 0.2} + 20%|██ | 106/520 [06:46<25:19, 3.67s/it] 21%|██ | 107/520 [06:50<25:17, 3.67s/it] {'loss': 1.3341, 'grad_norm': 0.0002900106508833249, 'learning_rate': 0.009216957229064428, 'epoch': 0.21} + 21%|██ | 107/520 [06:50<25:17, 3.67s/it] 21%|██ | 108/520 [06:54<25:11, 3.67s/it] {'loss': 1.363, 'grad_norm': 0.0003184552161773686, 'learning_rate': 0.009200129615753859, 'epoch': 0.21} + 21%|██ | 108/520 [06:54<25:11, 3.67s/it] 21%|██ | 109/520 [06:57<25:07, 3.67s/it] {'loss': 1.2901, 'grad_norm': 0.0002581412669068385, 'learning_rate': 0.009183138810032099, 'epoch': 0.21} + 21%|██ | 109/520 [06:57<25:07, 3.67s/it] 21%|██ | 110/520 [07:01<24:56, 3.65s/it] {'loss': 1.514, 'grad_norm': 0.0003004256281358265, 'learning_rate': 0.009165985472062245, 'epoch': 0.21} + 21%|██ | 110/520 [07:01<24:56, 3.65s/it] 21%|██▏ | 111/520 [07:04<24:51, 3.65s/it] {'loss': 1.5271, 'grad_norm': 0.0003116638397435318, 'learning_rate': 0.009148670268322438, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:04<24:51, 3.65s/it] 22%|██▏ | 112/520 [07:08<24:55, 3.67s/it] {'loss': 1.3836, 'grad_norm': 0.00027670999992293274, 'learning_rate': 0.009131193871579974, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:08<24:55, 3.67s/it] 22%|██▏ | 113/520 [07:12<24:51, 3.67s/it] {'loss': 1.3088, 'grad_norm': 0.00026973173919765057, 'learning_rate': 0.009113556960865167, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:12<24:51, 3.67s/it] 22%|██▏ | 114/520 [07:16<24:59, 3.69s/it] {'loss': 1.4158, 'grad_norm': 0.00030974850778069254, 'learning_rate': 0.00909576022144496, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:16<24:59, 3.69s/it] 22%|██▏ | 115/520 [07:19<24:51, 3.68s/it] {'loss': 1.469, 'grad_norm': 0.00032338448433685054, 'learning_rate': 0.009077804344796301, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:19<24:51, 3.68s/it] 22%|██▏ | 116/520 [07:23<24:42, 3.67s/it] {'loss': 1.4916, 'grad_norm': 0.00029140217089090046, 'learning_rate': 0.009059690028579283, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:23<24:42, 3.67s/it] 22%|██▎ | 117/520 [07:27<24:46, 3.69s/it] {'loss': 1.4599, 'grad_norm': 0.00031662329775732816, 'learning_rate': 0.009041417976610027, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:27<24:46, 3.69s/it] 23%|██▎ | 118/520 [07:30<24:38, 3.68s/it] {'loss': 1.4082, 'grad_norm': 0.00028954760872082225, 'learning_rate': 0.009022988898833342, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:30<24:38, 3.68s/it] 23%|██▎ | 119/520 [07:34<24:37, 3.68s/it] {'loss': 1.3352, 'grad_norm': 0.00029204469963468844, 'learning_rate': 0.009004403511295141, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:34<24:37, 3.68s/it] 23%|██▎ | 120/520 [07:38<24:38, 3.70s/it] {'loss': 1.3722, 'grad_norm': 0.0003280795629176708, 'learning_rate': 0.008985662536114613, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:38<24:38, 3.70s/it] 23%|██▎ | 121/520 [07:41<24:29, 3.68s/it] {'loss': 1.4074, 'grad_norm': 0.0003152693550452412, 'learning_rate': 0.008966766701456175, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:41<24:29, 3.68s/it] 23%|██▎ | 122/520 [07:45<24:35, 3.71s/it] {'loss': 1.3362, 'grad_norm': 0.00030472425000484677, 'learning_rate': 0.008947716741501178, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:45<24:35, 3.71s/it] 24%|██▎ | 123/520 [07:49<24:31, 3.71s/it] {'loss': 1.3674, 'grad_norm': 0.0002700909473237068, 'learning_rate': 0.008928513396419369, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:49<24:31, 3.71s/it] 24%|██▍ | 124/520 [07:52<24:15, 3.68s/it] {'loss': 1.375, 'grad_norm': 0.000333831758336233, 'learning_rate': 0.008909157412340149, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:52<24:15, 3.68s/it] 24%|██▍ | 125/520 [07:56<24:17, 3.69s/it] {'loss': 1.372, 'grad_norm': 0.00031200749159208084, 'learning_rate': 0.008889649541323574, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:56<24:17, 3.69s/it] 24%|██▍ | 126/520 [08:00<25:27, 3.88s/it] {'loss': 1.297, 'grad_norm': 0.0002626677703426251, 'learning_rate': 0.008869990541331137, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:00<25:27, 3.88s/it] 24%|██▍ | 127/520 [08:04<24:54, 3.80s/it] {'loss': 1.3762, 'grad_norm': 0.0003547610943078893, 'learning_rate': 0.008850181176196315, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:04<24:54, 3.80s/it] 25%|██▍ | 128/520 [08:08<24:35, 3.77s/it] {'loss': 1.3914, 'grad_norm': 0.00031904760415219814, 'learning_rate': 0.00883022221559489, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:08<24:35, 3.77s/it] 25%|██▍ | 129/520 [08:11<24:22, 3.74s/it] {'loss': 1.3733, 'grad_norm': 0.0002722579176487874, 'learning_rate': 0.008810114435015054, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:11<24:22, 3.74s/it] 25%|██▌ | 130/520 [08:15<24:19, 3.74s/it] {'loss': 1.3762, 'grad_norm': 0.00027204323764168505, 'learning_rate': 0.008789858615727265, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:15<24:19, 3.74s/it] 25%|██▌ | 131/520 [08:19<24:12, 3.73s/it] {'loss': 1.2645, 'grad_norm': 0.0002635227428866365, 'learning_rate': 0.008769455544753899, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:19<24:12, 3.73s/it] 25%|██▌ | 132/520 [08:23<24:04, 3.72s/it] {'loss': 1.4486, 'grad_norm': 0.00035223970724521786, 'learning_rate': 0.008748906014838671, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:23<24:04, 3.72s/it] 26%|██▌ | 133/520 [08:26<24:00, 3.72s/it] {'loss': 1.37, 'grad_norm': 0.0003773530855558947, 'learning_rate': 0.008728210824415827, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:26<24:00, 3.72s/it] 26%|██▌ | 134/520 [08:30<23:52, 3.71s/it] {'loss': 1.4272, 'grad_norm': 0.00031844518959112366, 'learning_rate': 0.008707370777579134, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:30<23:52, 3.71s/it] 26%|██▌ | 135/520 [08:34<23:43, 3.70s/it] {'loss': 1.4813, 'grad_norm': 0.0003101988255430541, 'learning_rate': 0.00868638668405062, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:34<23:43, 3.70s/it] 26%|██▌ | 136/520 [08:37<23:40, 3.70s/it] {'loss': 1.4423, 'grad_norm': 0.0003044772234625009, 'learning_rate': 0.008665259359149132, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:37<23:40, 3.70s/it] 26%|██▋ | 137/520 [08:41<23:38, 3.70s/it] {'loss': 1.3693, 'grad_norm': 0.0003582689556437689, 'learning_rate': 0.008643989623758643, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:41<23:38, 3.70s/it] 27%|██▋ | 138/520 [08:45<23:37, 3.71s/it] {'loss': 1.3621, 'grad_norm': 0.00027533824274512833, 'learning_rate': 0.008622578304296363, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:45<23:37, 3.71s/it] 27%|██▋ | 139/520 [08:48<23:30, 3.70s/it] {'loss': 1.1937, 'grad_norm': 0.0002845804653590749, 'learning_rate': 0.008601026232680633, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:48<23:30, 3.70s/it] 27%|██▋ | 140/520 [08:52<23:27, 3.70s/it] {'loss': 1.3275, 'grad_norm': 0.00024687586215659234, 'learning_rate': 0.008579334246298592, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:52<23:27, 3.70s/it] 27%|██▋ | 141/520 [08:56<23:33, 3.73s/it] {'loss': 1.437, 'grad_norm': 0.00028910398842709167, 'learning_rate': 0.008557503187973652, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:56<23:33, 3.73s/it] 27%|██▋ | 142/520 [09:00<23:26, 3.72s/it] {'loss': 1.3402, 'grad_norm': 0.00028670634861619555, 'learning_rate': 0.008535533905932738, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:00<23:26, 3.72s/it] 28%|██▊ | 143/520 [09:03<23:16, 3.70s/it] {'loss': 1.3698, 'grad_norm': 0.000331313329679509, 'learning_rate': 0.008513427253773347, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:03<23:16, 3.70s/it] 28%|██▊ | 144/520 [09:07<23:09, 3.70s/it] {'loss': 1.3901, 'grad_norm': 0.00035196166889077374, 'learning_rate': 0.008491184090430363, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:07<23:09, 3.70s/it] 28%|██▊ | 145/520 [09:11<23:00, 3.68s/it] {'loss': 1.2889, 'grad_norm': 0.0003343264736789693, 'learning_rate': 0.008468805280142709, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:11<23:00, 3.68s/it] 28%|██▊ | 146/520 [09:14<22:53, 3.67s/it] {'loss': 1.3868, 'grad_norm': 0.0003117908931380249, 'learning_rate': 0.008446291692419736, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:14<22:53, 3.67s/it] 28%|██▊ | 147/520 [09:18<22:54, 3.68s/it] {'loss': 1.3305, 'grad_norm': 0.00035530267615717175, 'learning_rate': 0.008423644202007467, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:18<22:54, 3.68s/it] 28%|██▊ | 148/520 [09:22<22:43, 3.67s/it] {'loss': 1.3763, 'grad_norm': 0.0003137483946060849, 'learning_rate': 0.008400863688854596, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:22<22:43, 3.67s/it] 29%|██▊ | 149/520 [09:25<22:39, 3.67s/it] {'loss': 1.3008, 'grad_norm': 0.0003352622726789807, 'learning_rate': 0.008377951038078302, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:25<22:39, 3.67s/it] 29%|██▉ | 150/520 [09:29<22:52, 3.71s/it] {'loss': 1.5065, 'grad_norm': 0.0003371424127403394, 'learning_rate': 0.00835490713992985, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:29<22:52, 3.71s/it] 29%|██▉ | 151/520 [09:33<22:59, 3.74s/it] {'loss': 1.36, 'grad_norm': 0.00033343805310944157, 'learning_rate': 0.00833173288976002, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:33<22:59, 3.74s/it] 29%|██▉ | 152/520 [09:37<23:01, 3.75s/it] {'loss': 1.3322, 'grad_norm': 0.00033768164606689204, 'learning_rate': 0.008308429187984297, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:37<23:01, 3.75s/it] 29%|██▉ | 153/520 [09:40<22:57, 3.75s/it] {'loss': 1.3526, 'grad_norm': 0.00032899866546174777, 'learning_rate': 0.008284996940047903, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:40<22:57, 3.75s/it] 30%|██▉ | 154/520 [09:44<23:01, 3.77s/it] {'loss': 1.4262, 'grad_norm': 0.0002993308120592219, 'learning_rate': 0.008261437056390606, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:44<23:01, 3.77s/it] 30%|██▉ | 155/520 [09:48<22:48, 3.75s/it] {'loss': 1.3485, 'grad_norm': 0.0003466947252758196, 'learning_rate': 0.008237750452411352, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:48<22:48, 3.75s/it] 30%|███ | 156/520 [09:52<22:42, 3.74s/it] {'loss': 1.3763, 'grad_norm': 0.00036263961219195126, 'learning_rate': 0.008213938048432696, 'epoch': 0.3} + 30%|███ | 156/520 [09:52<22:42, 3.74s/it] 30%|███ | 157/520 [09:55<22:32, 3.73s/it] {'loss': 1.392, 'grad_norm': 0.0003154344354663923, 'learning_rate': 0.008190000769665043, 'epoch': 0.3} + 30%|███ | 157/520 [09:55<22:32, 3.73s/it] 30%|███ | 158/520 [09:59<22:31, 3.73s/it] {'loss': 1.362, 'grad_norm': 0.0003081561623264214, 'learning_rate': 0.0081659395461707, 'epoch': 0.3} + 30%|███ | 158/520 [09:59<22:31, 3.73s/it] 31%|███ | 159/520 [10:03<22:17, 3.71s/it] {'loss': 1.4325, 'grad_norm': 0.0003338808127934969, 'learning_rate': 0.008141755312827736, 'epoch': 0.31} + 31%|███ | 159/520 [10:03<22:17, 3.71s/it] 31%|███ | 160/520 [10:07<22:23, 3.73s/it] {'loss': 1.4057, 'grad_norm': 0.00034750147713231473, 'learning_rate': 0.008117449009293669, 'epoch': 0.31} + 31%|███ | 160/520 [10:07<22:23, 3.73s/it] 31%|███ | 161/520 [10:10<22:12, 3.71s/it] {'loss': 1.4035, 'grad_norm': 0.00032983691946483307, 'learning_rate': 0.008093021579968942, 'epoch': 0.31} + 31%|███ | 161/520 [10:10<22:12, 3.71s/it] 31%|███ | 162/520 [10:14<22:09, 3.71s/it] {'loss': 1.3223, 'grad_norm': 0.00030459944107250135, 'learning_rate': 0.008068473973960237, 'epoch': 0.31} + 31%|███ | 162/520 [10:14<22:09, 3.71s/it] 31%|███▏ | 163/520 [10:18<22:00, 3.70s/it] {'loss': 1.3089, 'grad_norm': 0.00041803119268556035, 'learning_rate': 0.008043807145043604, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:18<22:00, 3.70s/it] 32%|███▏ | 164/520 [10:21<21:57, 3.70s/it] {'loss': 1.2533, 'grad_norm': 0.0003355607602210277, 'learning_rate': 0.008019022051627387, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:21<21:57, 3.70s/it] 32%|███▏ | 165/520 [10:25<21:53, 3.70s/it] {'loss': 1.4367, 'grad_norm': 0.0003509025427757299, 'learning_rate': 0.007994119656715003, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:25<21:53, 3.70s/it] 32%|███▏ | 166/520 [10:29<21:44, 3.69s/it] {'loss': 1.3836, 'grad_norm': 0.00038396447987950396, 'learning_rate': 0.007969100927867508, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:29<21:44, 3.69s/it] 32%|███▏ | 167/520 [10:32<21:42, 3.69s/it] {'loss': 1.3665, 'grad_norm': 0.00035058755897307195, 'learning_rate': 0.007943966837166024, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:32<21:42, 3.69s/it] 32%|███▏ | 168/520 [10:36<21:41, 3.70s/it] {'loss': 1.3036, 'grad_norm': 0.00031687995484266717, 'learning_rate': 0.00791871836117395, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:36<21:41, 3.70s/it] 32%|███▎ | 169/520 [10:40<21:45, 3.72s/it] {'loss': 1.381, 'grad_norm': 0.0003257953671404042, 'learning_rate': 0.00789335648089903, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:40<21:45, 3.72s/it] 33%|███▎ | 170/520 [10:44<22:06, 3.79s/it] {'loss': 1.2741, 'grad_norm': 0.0002634637815066133, 'learning_rate': 0.00786788218175523, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:44<22:06, 3.79s/it] 33%|███▎ | 171/520 [10:48<22:17, 3.83s/it] {'loss': 1.3352, 'grad_norm': 0.00041707790882067574, 'learning_rate': 0.007842296453524463, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:48<22:17, 3.83s/it] 33%|███▎ | 172/520 [10:52<22:21, 3.86s/it] {'loss': 1.3883, 'grad_norm': 0.00034936959503147893, 'learning_rate': 0.00781660029031811, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:52<22:21, 3.86s/it] 33%|███▎ | 173/520 [10:56<22:26, 3.88s/it] {'loss': 1.3218, 'grad_norm': 0.00035150419423066545, 'learning_rate': 0.007790794690538421, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:56<22:26, 3.88s/it] 33%|███▎ | 174/520 [11:00<22:34, 3.91s/it] {'loss': 1.3802, 'grad_norm': 0.0003701833450929908, 'learning_rate': 0.007764880656839696, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:00<22:34, 3.91s/it] 34%|███▎ | 175/520 [11:04<22:30, 3.91s/it] {'loss': 1.2876, 'grad_norm': 0.00039883541190506214, 'learning_rate': 0.007738859196089357, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:04<22:30, 3.91s/it] 34%|███▍ | 176/520 [11:08<22:35, 3.94s/it] {'loss': 1.3365, 'grad_norm': 0.00035385762275764505, 'learning_rate': 0.007712731319328797, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:08<22:35, 3.94s/it] 34%|███▍ | 177/520 [11:11<22:23, 3.92s/it] {'loss': 1.2356, 'grad_norm': 0.00031740367871666177, 'learning_rate': 0.0076864980417341204, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:11<22:23, 3.92s/it] 34%|███▍ | 178/520 [11:15<22:19, 3.92s/it] {'loss': 1.3482, 'grad_norm': 0.0003593964039035514, 'learning_rate': 0.007660160382576683, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:15<22:19, 3.92s/it] 34%|███▍ | 179/520 [11:19<21:55, 3.86s/it] {'loss': 1.4337, 'grad_norm': 0.00040246435123247003, 'learning_rate': 0.007633719365183504, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:19<21:55, 3.86s/it] 35%|███▍ | 180/520 [11:23<21:31, 3.80s/it] {'loss': 1.3545, 'grad_norm': 0.00032439875551392794, 'learning_rate': 0.007607176016897491, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:23<21:31, 3.80s/it] 35%|███▍ | 181/520 [11:26<21:14, 3.76s/it] {'loss': 1.3223, 'grad_norm': 0.0003046901850425839, 'learning_rate': 0.007580531369037533, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:26<21:14, 3.76s/it] 35%|███▌ | 182/520 [11:30<21:08, 3.75s/it] {'loss': 1.351, 'grad_norm': 0.00035713793941108467, 'learning_rate': 0.007553786456858429, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:30<21:08, 3.75s/it] 35%|███▌ | 183/520 [11:34<20:54, 3.72s/it] {'loss': 1.352, 'grad_norm': 0.00028966664191289936, 'learning_rate': 0.007526942319510655, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:34<20:54, 3.72s/it] 35%|███▌ | 184/520 [11:37<20:50, 3.72s/it] {'loss': 1.3502, 'grad_norm': 0.0004050505793170551, 'learning_rate': 0.0075, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:37<20:50, 3.72s/it] 36%|███▌ | 185/520 [11:41<20:42, 3.71s/it] {'loss': 1.4007, 'grad_norm': 0.0003497826418480238, 'learning_rate': 0.007472960545147038, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:41<20:42, 3.71s/it] 36%|███▌ | 186/520 [11:45<20:38, 3.71s/it] {'loss': 1.3542, 'grad_norm': 0.0003887637572564775, 'learning_rate': 0.0074458250055464475, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:45<20:38, 3.71s/it] 36%|███▌ | 187/520 [11:49<20:35, 3.71s/it] {'loss': 1.3434, 'grad_norm': 0.000489452230182375, 'learning_rate': 0.007418594435526199, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:49<20:35, 3.71s/it] 36%|███▌ | 188/520 [11:52<20:27, 3.70s/it] {'loss': 1.4377, 'grad_norm': 0.0004131832600702502, 'learning_rate': 0.007391269893106591, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:52<20:27, 3.70s/it] 36%|███▋ | 189/520 [11:56<20:24, 3.70s/it] {'loss': 1.4089, 'grad_norm': 0.00032091669037799075, 'learning_rate': 0.007363852439959136, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:56<20:24, 3.70s/it] 37%|███▋ | 190/520 [12:00<20:20, 3.70s/it] {'loss': 1.3385, 'grad_norm': 0.0004341766321454877, 'learning_rate': 0.0073363431413653105, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:00<20:20, 3.70s/it] 37%|███▋ | 191/520 [12:03<20:15, 3.70s/it] {'loss': 1.317, 'grad_norm': 0.00036243477870926885, 'learning_rate': 0.007308743066175171, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:03<20:15, 3.70s/it] 37%|███▋ | 192/520 [12:07<20:16, 3.71s/it] {'loss': 1.3664, 'grad_norm': 0.0003221682922391749, 'learning_rate': 0.007281053286765815, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:07<20:16, 3.71s/it] 37%|███▋ | 193/520 [12:11<20:12, 3.71s/it] {'loss': 1.2932, 'grad_norm': 0.00035543101868521334, 'learning_rate': 0.007253274878999727, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:11<20:12, 3.71s/it] 37%|███▋ | 194/520 [12:15<20:15, 3.73s/it] {'loss': 1.1867, 'grad_norm': 0.00039202588130643246, 'learning_rate': 0.007225408922182961, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:15<20:15, 3.73s/it] 38%|███▊ | 195/520 [12:18<20:06, 3.71s/it] {'loss': 1.372, 'grad_norm': 0.0003072074945910208, 'learning_rate': 0.007197456499023225, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:18<20:06, 3.71s/it] 38%|███▊ | 196/520 [12:22<19:54, 3.69s/it] {'loss': 1.3984, 'grad_norm': 0.00037669170153229684, 'learning_rate': 0.007169418695587791, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:22<19:54, 3.69s/it] 38%|███▊ | 197/520 [12:26<20:02, 3.72s/it] {'loss': 1.3217, 'grad_norm': 0.00044871923098451026, 'learning_rate': 0.007141296601261313, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:26<20:02, 3.72s/it] 38%|███▊ | 198/520 [12:29<19:52, 3.70s/it] {'loss': 1.4145, 'grad_norm': 0.0003806738194147654, 'learning_rate': 0.007113091308703498, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:29<19:52, 3.70s/it] 38%|███▊ | 199/520 [12:33<19:45, 3.69s/it] {'loss': 1.3147, 'grad_norm': 0.0004343884686536732, 'learning_rate': 0.007084803913806641, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:33<19:45, 3.69s/it] 38%|███▊ | 200/520 [12:37<19:40, 3.69s/it] {'loss': 1.2462, 'grad_norm': 0.0003835968585801307, 'learning_rate': 0.007056435515653059, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:37<19:40, 3.69s/it] 39%|███▊ | 201/520 [12:40<19:36, 3.69s/it] {'loss': 1.2588, 'grad_norm': 0.00034887462120988496, 'learning_rate': 0.007027987216472376, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:40<19:36, 3.69s/it] 39%|███▉ | 202/520 [12:44<19:31, 3.69s/it] {'loss': 1.3546, 'grad_norm': 0.00038535538802375564, 'learning_rate': 0.006999460121598704, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:44<19:31, 3.69s/it] 39%|███▉ | 203/520 [12:48<19:34, 3.71s/it] {'loss': 1.3633, 'grad_norm': 0.0003659615809497967, 'learning_rate': 0.0069708553394276975, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:48<19:34, 3.71s/it] 39%|███▉ | 204/520 [12:51<19:26, 3.69s/it] {'loss': 1.3938, 'grad_norm': 0.00039728859055658793, 'learning_rate': 0.006942173981373474, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:51<19:26, 3.69s/it] 39%|███▉ | 205/520 [12:55<19:32, 3.72s/it] {'loss': 1.2606, 'grad_norm': 0.00032755962168575545, 'learning_rate': 0.00691341716182545, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:55<19:32, 3.72s/it] 40%|███▉ | 206/520 [12:59<19:22, 3.70s/it] {'loss': 1.393, 'grad_norm': 0.0004369836248336737, 'learning_rate': 0.006884585998105026, 'epoch': 0.4} + 40%|███▉ | 206/520 [12:59<19:22, 3.70s/it] 40%|███▉ | 207/520 [13:03<19:17, 3.70s/it] {'loss': 1.2106, 'grad_norm': 0.0003089115907861448, 'learning_rate': 0.00685568161042219, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:03<19:17, 3.70s/it] 40%|████ | 208/520 [13:06<19:09, 3.69s/it] {'loss': 1.3994, 'grad_norm': 0.0005595133568363164, 'learning_rate': 0.0068267051218319766, 'epoch': 0.4} + 40%|████ | 208/520 [13:06<19:09, 3.69s/it] 40%|████ | 209/520 [13:10<19:04, 3.68s/it] {'loss': 1.3096, 'grad_norm': 0.00038136920554882563, 'learning_rate': 0.006797657658190838, 'epoch': 0.4} + 40%|████ | 209/520 [13:10<19:04, 3.68s/it] 40%|████ | 210/520 [13:14<19:00, 3.68s/it] {'loss': 1.3745, 'grad_norm': 0.00038007350638773077, 'learning_rate': 0.006768540348112907, 'epoch': 0.4} + 40%|████ | 210/520 [13:14<19:00, 3.68s/it] 41%|████ | 211/520 [13:17<18:56, 3.68s/it] {'loss': 1.3676, 'grad_norm': 0.00045444783480502634, 'learning_rate': 0.006739354322926136, 'epoch': 0.41} + 41%|████ | 211/520 [13:17<18:56, 3.68s/it] 41%|████ | 212/520 [13:21<18:51, 3.67s/it] {'loss': 1.3816, 'grad_norm': 0.0003942259383767208, 'learning_rate': 0.006710100716628344, 'epoch': 0.41} + 41%|████ | 212/520 [13:21<18:51, 3.67s/it] 41%|████ | 213/520 [13:25<18:47, 3.67s/it] {'loss': 1.3341, 'grad_norm': 0.0004204884701464466, 'learning_rate': 0.006680780665843155, 'epoch': 0.41} + 41%|████ | 213/520 [13:25<18:47, 3.67s/it] 41%|████ | 214/520 [13:28<18:44, 3.67s/it] {'loss': 1.3425, 'grad_norm': 0.0004382096255042692, 'learning_rate': 0.006651395309775836, 'epoch': 0.41} + 41%|████ | 214/520 [13:28<18:44, 3.67s/it] 41%|████▏ | 215/520 [13:32<18:37, 3.66s/it] {'loss': 1.2008, 'grad_norm': 0.0003648907727755857, 'learning_rate': 0.006621945790169036, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:32<18:37, 3.66s/it] 42%|████▏ | 216/520 [13:36<18:56, 3.74s/it] {'loss': 1.246, 'grad_norm': 0.00040950281560983695, 'learning_rate': 0.006592433251258423, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:36<18:56, 3.74s/it] 42%|████▏ | 217/520 [13:40<18:48, 3.73s/it] {'loss': 1.3739, 'grad_norm': 0.0004468124072485261, 'learning_rate': 0.006562858839728223, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:40<18:48, 3.73s/it] 42%|████▏ | 218/520 [13:43<18:41, 3.71s/it] {'loss': 1.3554, 'grad_norm': 0.00049189234225366, 'learning_rate': 0.006533223704666672, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:43<18:41, 3.71s/it] 42%|████▏ | 219/520 [13:47<18:40, 3.72s/it] {'loss': 1.3744, 'grad_norm': 0.0003706648457845689, 'learning_rate': 0.006503528997521366, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:47<18:40, 3.72s/it] 42%|████▏ | 220/520 [13:51<18:39, 3.73s/it] {'loss': 1.2331, 'grad_norm': 0.0003778417564038689, 'learning_rate': 0.006473775872054522, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:51<18:39, 3.73s/it] 42%|████▎ | 221/520 [13:54<18:28, 3.71s/it] {'loss': 1.3637, 'grad_norm': 0.0004224933604375207, 'learning_rate': 0.00644396548429815, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:54<18:28, 3.71s/it] 43%|████▎ | 222/520 [13:58<18:20, 3.69s/it] {'loss': 1.296, 'grad_norm': 0.0003919395966455178, 'learning_rate': 0.006414098992509137, 'epoch': 0.43} + 43%|████▎ | 222/520 [13:58<18:20, 3.69s/it] 43%|████▎ | 223/520 [14:02<18:17, 3.70s/it] {'loss': 1.3132, 'grad_norm': 0.0004225404772906909, 'learning_rate': 0.0063841775571242465, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:02<18:17, 3.70s/it] 43%|████▎ | 224/520 [14:05<18:09, 3.68s/it] {'loss': 1.2592, 'grad_norm': 0.00036237160879320905, 'learning_rate': 0.006354202340715026, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:05<18:09, 3.68s/it] 43%|████▎ | 225/520 [14:09<18:08, 3.69s/it] {'loss': 1.3041, 'grad_norm': 0.0003875529852434301, 'learning_rate': 0.006324174507942636, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:09<18:08, 3.69s/it] 43%|████▎ | 226/520 [14:13<18:01, 3.68s/it] {'loss': 1.4353, 'grad_norm': 0.00046302458081862483, 'learning_rate': 0.006294095225512604, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:13<18:01, 3.68s/it] 44%|████▎ | 227/520 [14:17<18:14, 3.74s/it] {'loss': 1.3825, 'grad_norm': 0.0003854462182337265, 'learning_rate': 0.006263965662129487, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:17<18:14, 3.74s/it] 44%|████▍ | 228/520 [14:20<18:12, 3.74s/it] {'loss': 1.3233, 'grad_norm': 0.0003635536390802439, 'learning_rate': 0.006233786988451467, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:20<18:12, 3.74s/it] 44%|████▍ | 229/520 [14:24<18:02, 3.72s/it] {'loss': 1.3526, 'grad_norm': 0.00036889090738753666, 'learning_rate': 0.006203560377044865, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:24<18:02, 3.72s/it] 44%|████▍ | 230/520 [14:28<17:59, 3.72s/it] {'loss': 1.2622, 'grad_norm': 0.00041049067820861205, 'learning_rate': 0.006173287002338577, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:28<17:59, 3.72s/it] 44%|████▍ | 231/520 [14:31<17:57, 3.73s/it] {'loss': 1.3337, 'grad_norm': 0.00043476798214016696, 'learning_rate': 0.006142968040578448, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:31<17:57, 3.73s/it] 45%|████▍ | 232/520 [14:35<17:54, 3.73s/it] {'loss': 1.3687, 'grad_norm': 0.0003888370380261687, 'learning_rate': 0.006112604669781572, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:35<17:54, 3.73s/it] 45%|████▍ | 233/520 [14:39<17:48, 3.72s/it] {'loss': 1.2882, 'grad_norm': 0.00046433727760686924, 'learning_rate': 0.006082198069690514, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:39<17:48, 3.72s/it] 45%|████▌ | 234/520 [14:43<17:42, 3.71s/it] {'loss': 1.2772, 'grad_norm': 0.0005340820347428453, 'learning_rate': 0.00605174942172748, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:43<17:42, 3.71s/it] 45%|████▌ | 235/520 [14:46<17:38, 3.71s/it] {'loss': 1.3495, 'grad_norm': 0.00047064438888274316, 'learning_rate': 0.006021259908948402, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:46<17:38, 3.71s/it] 45%|████▌ | 236/520 [14:50<17:36, 3.72s/it] {'loss': 1.3979, 'grad_norm': 0.0004758242548642513, 'learning_rate': 0.005990730715996988, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:50<17:36, 3.72s/it] 46%|████▌ | 237/520 [14:54<17:30, 3.71s/it] {'loss': 1.3949, 'grad_norm': 0.00042445973595646534, 'learning_rate': 0.005960163029058682, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:54<17:30, 3.71s/it] 46%|████▌ | 238/520 [14:57<17:23, 3.70s/it] {'loss': 1.3435, 'grad_norm': 0.0004380124364175028, 'learning_rate': 0.005929558035814574, 'epoch': 0.46} + 46%|████▌ | 238/520 [14:57<17:23, 3.70s/it] 46%|████▌ | 239/520 [15:01<17:25, 3.72s/it] {'loss': 1.3911, 'grad_norm': 0.0004281662893298545, 'learning_rate': 0.005898916925395264, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:01<17:25, 3.72s/it] 46%|████▌ | 240/520 [15:05<17:30, 3.75s/it] {'loss': 1.2326, 'grad_norm': 0.00047029701153531464, 'learning_rate': 0.005868240888334653, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:05<17:30, 3.75s/it] 46%|████▋ | 241/520 [15:09<17:30, 3.77s/it] {'loss': 1.3243, 'grad_norm': 0.0003929841346350455, 'learning_rate': 0.005837531116523682, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:09<17:30, 3.77s/it] 47%|████▋ | 242/520 [15:13<17:30, 3.78s/it] {'loss': 1.3071, 'grad_norm': 0.00035397718969306264, 'learning_rate': 0.005806788803164034, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:13<17:30, 3.78s/it] 47%|████▋ | 243/520 [15:16<17:25, 3.78s/it] {'loss': 1.3172, 'grad_norm': 0.0003815481948539133, 'learning_rate': 0.005776015142721758, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:16<17:25, 3.78s/it] 47%|████▋ | 244/520 [15:20<17:27, 3.79s/it] {'loss': 1.3845, 'grad_norm': 0.00044306353648814503, 'learning_rate': 0.005745211330880871, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:20<17:27, 3.79s/it] 47%|████▋ | 245/520 [15:24<17:17, 3.77s/it] {'loss': 1.3052, 'grad_norm': 0.00042727065387716535, 'learning_rate': 0.005714378564496901, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:24<17:17, 3.77s/it] 47%|████▋ | 246/520 [15:28<17:08, 3.75s/it] {'loss': 1.3567, 'grad_norm': 0.00039947985068158393, 'learning_rate': 0.0056835180415503676, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:28<17:08, 3.75s/it] 48%|████▊ | 247/520 [15:31<17:00, 3.74s/it] {'loss': 1.4659, 'grad_norm': 0.0004461420321612099, 'learning_rate': 0.005652630961100259, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:31<17:00, 3.74s/it] 48%|████▊ | 248/520 [15:35<16:58, 3.74s/it] {'loss': 1.2749, 'grad_norm': 0.00040846944370425233, 'learning_rate': 0.005621718523237427, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:35<16:58, 3.74s/it] 48%|████▊ | 249/520 [15:39<16:46, 3.72s/it] {'loss': 1.3704, 'grad_norm': 0.00044154668627393126, 'learning_rate': 0.005590781929037965, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:39<16:46, 3.72s/it] 48%|████▊ | 250/520 [15:42<16:38, 3.70s/it] {'loss': 1.3351, 'grad_norm': 0.0005104132863211012, 'learning_rate': 0.005559822380516539, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:42<16:38, 3.70s/it] 48%|████▊ | 251/520 [15:46<16:39, 3.72s/it] {'loss': 1.3897, 'grad_norm': 0.00039085696542362264, 'learning_rate': 0.005528841080579689, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:46<16:39, 3.72s/it] 48%|████▊ | 252/520 [15:50<16:48, 3.76s/it] {'loss': 1.2752, 'grad_norm': 0.0003728084577515678, 'learning_rate': 0.005497839232979084, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:50<16:48, 3.76s/it] 49%|████▊ | 253/520 [15:54<16:40, 3.75s/it] {'loss': 1.3618, 'grad_norm': 0.0004972779020569711, 'learning_rate': 0.0054668180422647525, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:54<16:40, 3.75s/it] 49%|████▉ | 254/520 [15:57<16:35, 3.74s/it] {'loss': 1.3198, 'grad_norm': 0.0003941946776885825, 'learning_rate': 0.005435778713738292, 'epoch': 0.49} + 49%|████▉ | 254/520 [15:57<16:35, 3.74s/it] 49%|████▉ | 255/520 [16:01<16:21, 3.70s/it] {'loss': 1.326, 'grad_norm': 0.0004908642590208214, 'learning_rate': 0.0054047224534060165, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:01<16:21, 3.70s/it] 49%|████▉ | 256/520 [16:05<16:11, 3.68s/it] {'loss': 1.3988, 'grad_norm': 0.00043462715052636203, 'learning_rate': 0.005373650467932122, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:05<16:11, 3.68s/it] 49%|████▉ | 257/520 [16:08<16:07, 3.68s/it] {'loss': 1.3609, 'grad_norm': 0.0004317149689611481, 'learning_rate': 0.005342563964591783, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:08<16:07, 3.68s/it] 50%|████▉ | 258/520 [16:12<16:00, 3.67s/it] {'loss': 1.3605, 'grad_norm': 0.0003788714942525103, 'learning_rate': 0.0053114641512242615, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:12<16:00, 3.67s/it] 50%|████▉ | 259/520 [16:16<16:01, 3.69s/it] {'loss': 1.4352, 'grad_norm': 0.0005268805201725431, 'learning_rate': 0.005280352236185959, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:16<16:01, 3.69s/it] 50%|█████ | 260/520 [16:20<16:09, 3.73s/it] {'loss': 1.3129, 'grad_norm': 0.0003342389717775317, 'learning_rate': 0.005249229428303486, 'epoch': 0.5} + 50%|█████ | 260/520 [16:20<16:09, 3.73s/it] 50%|█████ | 261/520 [16:23<16:07, 3.74s/it] {'loss': 1.2672, 'grad_norm': 0.0004003776844912218, 'learning_rate': 0.005218096936826681, 'epoch': 0.5} + 50%|█████ | 261/520 [16:23<16:07, 3.74s/it] 50%|█████ | 262/520 [16:27<15:56, 3.71s/it] {'loss': 1.3082, 'grad_norm': 0.0004705425514493166, 'learning_rate': 0.00518695597138163, 'epoch': 0.5} + 50%|█████ | 262/520 [16:27<15:56, 3.71s/it] 51%|█████ | 263/520 [16:31<15:54, 3.71s/it] {'loss': 1.2976, 'grad_norm': 0.00044094163212442895, 'learning_rate': 0.005155807741923666, 'epoch': 0.51} + 51%|█████ | 263/520 [16:31<15:54, 3.71s/it] 51%|█████ | 264/520 [16:35<15:57, 3.74s/it] {'loss': 1.381, 'grad_norm': 0.0004044124121256395, 'learning_rate': 0.005124653458690365, 'epoch': 0.51} + 51%|█████ | 264/520 [16:35<15:57, 3.74s/it] 51%|█████ | 265/520 [16:38<15:58, 3.76s/it] {'loss': 1.3468, 'grad_norm': 0.0005083248930589453, 'learning_rate': 0.005093494332154511, 'epoch': 0.51} + 51%|█████ | 265/520 [16:38<15:58, 3.76s/it] 51%|█████ | 266/520 [16:42<15:58, 3.77s/it] {'loss': 1.1999, 'grad_norm': 0.0004390973469708723, 'learning_rate': 0.005062331572977076, 'epoch': 0.51} + 51%|█████ | 266/520 [16:42<15:58, 3.77s/it] 51%|█████▏ | 267/520 [16:46<15:53, 3.77s/it] {'loss': 1.3092, 'grad_norm': 0.0005085700724176791, 'learning_rate': 0.005031166391960168, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:46<15:53, 3.77s/it] 52%|█████▏ | 268/520 [16:50<15:41, 3.73s/it] {'loss': 1.3992, 'grad_norm': 0.0005056131542124917, 'learning_rate': 0.005, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:50<15:41, 3.73s/it] 52%|█████▏ | 269/520 [16:53<15:35, 3.73s/it] {'loss': 1.4209, 'grad_norm': 0.00043355720353701254, 'learning_rate': 0.004968833608039832, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:53<15:35, 3.73s/it] 52%|█████▏ | 270/520 [16:57<15:28, 3.71s/it] {'loss': 1.2193, 'grad_norm': 0.0004131066045279778, 'learning_rate': 0.004937668427022925, 'epoch': 0.52} + 52%|█████▏ | 270/520 [16:57<15:28, 3.71s/it] 52%|█████▏ | 271/520 [17:01<15:20, 3.70s/it] {'loss': 1.3935, 'grad_norm': 0.00046586850437596887, 'learning_rate': 0.00490650566784549, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:01<15:20, 3.70s/it] 52%|█████▏ | 272/520 [17:04<15:15, 3.69s/it] {'loss': 1.2499, 'grad_norm': 0.0004534750173790765, 'learning_rate': 0.004875346541309637, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:04<15:15, 3.69s/it] 52%|█████▎ | 273/520 [17:08<15:12, 3.69s/it] {'loss': 1.3214, 'grad_norm': 0.0004728368221059346, 'learning_rate': 0.004844192258076335, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:08<15:12, 3.69s/it] 53%|█████▎ | 274/520 [17:12<15:05, 3.68s/it] {'loss': 1.403, 'grad_norm': 0.000625807660939295, 'learning_rate': 0.004813044028618372, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:12<15:05, 3.68s/it] 53%|█████▎ | 275/520 [17:15<14:59, 3.67s/it] {'loss': 1.3211, 'grad_norm': 0.0004812614916028044, 'learning_rate': 0.00478190306317332, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:15<14:59, 3.67s/it] 53%|█████▎ | 276/520 [17:19<14:58, 3.68s/it] {'loss': 1.3842, 'grad_norm': 0.0004931020579588226, 'learning_rate': 0.0047507705716965136, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:19<14:58, 3.68s/it] 53%|█████▎ | 277/520 [17:23<14:53, 3.68s/it] {'loss': 1.3447, 'grad_norm': 0.00041572427509581594, 'learning_rate': 0.00471964776381404, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:23<14:53, 3.68s/it] 53%|█████▎ | 278/520 [17:26<14:57, 3.71s/it] {'loss': 1.2849, 'grad_norm': 0.00040859405474648867, 'learning_rate': 0.0046885358487757395, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:26<14:57, 3.71s/it] 54%|█████▎ | 279/520 [17:30<14:48, 3.69s/it] {'loss': 1.2506, 'grad_norm': 0.0005887226413856237, 'learning_rate': 0.004657436035408217, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:30<14:48, 3.69s/it] 54%|█████▍ | 280/520 [17:34<14:42, 3.68s/it] {'loss': 1.3285, 'grad_norm': 0.0005045561939297156, 'learning_rate': 0.0046263495320678786, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:34<14:42, 3.68s/it] 54%|█████▍ | 281/520 [17:37<14:37, 3.67s/it] {'loss': 1.4096, 'grad_norm': 0.0005392873197882779, 'learning_rate': 0.004595277546593984, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:37<14:37, 3.67s/it] 54%|█████▍ | 282/520 [17:41<14:35, 3.68s/it] {'loss': 1.2962, 'grad_norm': 0.00044627378524751926, 'learning_rate': 0.004564221286261709, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:41<14:35, 3.68s/it] 54%|█████▍ | 283/520 [17:45<14:29, 3.67s/it] {'loss': 1.4076, 'grad_norm': 0.00044135602517882095, 'learning_rate': 0.004533181957735248, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:45<14:29, 3.67s/it] 55%|█████▍ | 284/520 [17:48<14:29, 3.68s/it] {'loss': 1.249, 'grad_norm': 0.0004832792640804892, 'learning_rate': 0.004502160767020917, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:48<14:29, 3.68s/it] 55%|█████▍ | 285/520 [17:52<14:20, 3.66s/it] {'loss': 1.3195, 'grad_norm': 0.00042555599777257256, 'learning_rate': 0.004471158919420312, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:52<14:20, 3.66s/it] 55%|█████▌ | 286/520 [17:56<14:18, 3.67s/it] {'loss': 1.199, 'grad_norm': 0.0005133213363045368, 'learning_rate': 0.004440177619483461, 'epoch': 0.55} + 55%|█████▌ | 286/520 [17:56<14:18, 3.67s/it] 55%|█████▌ | 287/520 [17:59<14:13, 3.66s/it] {'loss': 1.425, 'grad_norm': 0.0005212710562995658, 'learning_rate': 0.004409218070962036, 'epoch': 0.55} + 55%|█████▌ | 287/520 [17:59<14:13, 3.66s/it] 55%|█████▌ | 288/520 [18:03<14:11, 3.67s/it] {'loss': 1.4473, 'grad_norm': 0.0004118023153795553, 'learning_rate': 0.004378281476762576, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:03<14:11, 3.67s/it] 56%|█████▌ | 289/520 [18:07<14:09, 3.68s/it] {'loss': 1.3541, 'grad_norm': 0.00048713033967774986, 'learning_rate': 0.004347369038899743, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:07<14:09, 3.68s/it] 56%|█████▌ | 290/520 [18:10<14:04, 3.67s/it] {'loss': 1.2557, 'grad_norm': 0.00043551057484630945, 'learning_rate': 0.004316481958449634, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:10<14:04, 3.67s/it] 56%|█████▌ | 291/520 [18:14<14:06, 3.70s/it] {'loss': 1.285, 'grad_norm': 0.0005841998074087396, 'learning_rate': 0.004285621435503101, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:14<14:06, 3.70s/it] 56%|█████▌ | 292/520 [18:18<14:18, 3.77s/it] {'loss': 1.3624, 'grad_norm': 0.0005333424800038396, 'learning_rate': 0.004254788669119127, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:18<14:18, 3.77s/it] 56%|█████▋ | 293/520 [18:22<14:23, 3.81s/it] {'loss': 1.3096, 'grad_norm': 0.00049488107189268, 'learning_rate': 0.0042239848572782415, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:22<14:23, 3.81s/it] 57%|█████▋ | 294/520 [18:26<14:29, 3.85s/it] {'loss': 1.3327, 'grad_norm': 0.0004810955447411595, 'learning_rate': 0.0041932111968359664, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:26<14:29, 3.85s/it] 57%|█████▋ | 295/520 [18:30<14:28, 3.86s/it] {'loss': 1.2599, 'grad_norm': 0.0003870854367222117, 'learning_rate': 0.004162468883476319, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:30<14:28, 3.86s/it] 57%|█████▋ | 296/520 [18:34<14:24, 3.86s/it] {'loss': 1.2833, 'grad_norm': 0.0004762503000322757, 'learning_rate': 0.0041317591116653484, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:34<14:24, 3.86s/it] 57%|█████▋ | 297/520 [18:38<14:26, 3.88s/it] {'loss': 1.4201, 'grad_norm': 0.0005073459137161232, 'learning_rate': 0.004101083074604737, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:38<14:26, 3.88s/it] 57%|█████▋ | 298/520 [18:41<14:18, 3.87s/it] {'loss': 1.3663, 'grad_norm': 0.00045589186159964855, 'learning_rate': 0.004070441964185427, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:41<14:18, 3.87s/it] 57%|█████▊ | 299/520 [18:45<14:13, 3.86s/it] {'loss': 1.324, 'grad_norm': 0.0004395022924631471, 'learning_rate': 0.004039836970941319, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:45<14:13, 3.86s/it] 58%|█████▊ | 300/520 [18:49<14:04, 3.84s/it] {'loss': 1.4103, 'grad_norm': 0.0004935292789737909, 'learning_rate': 0.004009269284003013, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:49<14:04, 3.84s/it] 58%|█████▊ | 301/520 [18:53<13:49, 3.79s/it] {'loss': 1.4114, 'grad_norm': 0.00047154770303559296, 'learning_rate': 0.003978740091051599, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:53<13:49, 3.79s/it] 58%|█████▊ | 302/520 [18:57<13:42, 3.77s/it] {'loss': 1.3187, 'grad_norm': 0.0004835784813564035, 'learning_rate': 0.003948250578272522, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:57<13:42, 3.77s/it] 58%|█████▊ | 303/520 [19:00<13:29, 3.73s/it] {'loss': 1.3304, 'grad_norm': 0.0005280382163859069, 'learning_rate': 0.003917801930309486, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:00<13:29, 3.73s/it] 58%|█████▊ | 304/520 [19:04<13:24, 3.72s/it] {'loss': 1.2485, 'grad_norm': 0.0005102792049905018, 'learning_rate': 0.003887395330218428, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:04<13:24, 3.72s/it] 59%|█████▊ | 305/520 [19:07<13:15, 3.70s/it] {'loss': 1.4196, 'grad_norm': 0.0006208566872303704, 'learning_rate': 0.003857031959421553, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:08<13:15, 3.70s/it] 59%|█████▉ | 306/520 [19:11<13:09, 3.69s/it] {'loss': 1.372, 'grad_norm': 0.0005033061681350737, 'learning_rate': 0.003826712997661425, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:11<13:09, 3.69s/it] 59%|█████▉ | 307/520 [19:15<13:05, 3.69s/it] {'loss': 1.3043, 'grad_norm': 0.0005712373450512581, 'learning_rate': 0.0037964396229551362, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:15<13:05, 3.69s/it] 59%|█████▉ | 308/520 [19:18<12:59, 3.68s/it] {'loss': 1.4273, 'grad_norm': 0.00045473548927134306, 'learning_rate': 0.0037662130115485317, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:19<12:59, 3.68s/it] 59%|█████▉ | 309/520 [19:23<13:47, 3.92s/it] {'loss': 1.3226, 'grad_norm': 0.00043244083831331604, 'learning_rate': 0.003736034337870512, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:23<13:47, 3.92s/it] 60%|█████▉ | 310/520 [19:27<13:32, 3.87s/it] {'loss': 1.2803, 'grad_norm': 0.00045600169365697836, 'learning_rate': 0.003705904774487396, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:27<13:32, 3.87s/it] 60%|█████▉ | 311/520 [19:30<13:17, 3.82s/it] {'loss': 1.3006, 'grad_norm': 0.0005074651415780384, 'learning_rate': 0.003675825492057364, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:30<13:17, 3.82s/it] 60%|██████ | 312/520 [19:35<13:35, 3.92s/it] {'loss': 1.2766, 'grad_norm': 0.00046402390100539934, 'learning_rate': 0.0036457976592849752, 'epoch': 0.6} + 60%|██████ | 312/520 [19:35<13:35, 3.92s/it] 60%|██████ | 313/520 [19:38<13:16, 3.85s/it] {'loss': 1.2329, 'grad_norm': 0.00041788629698734397, 'learning_rate': 0.0036158224428757537, 'epoch': 0.6} + 60%|██████ | 313/520 [19:38<13:16, 3.85s/it] 60%|██████ | 314/520 [19:42<13:23, 3.90s/it] {'loss': 1.3034, 'grad_norm': 0.0005216165021660137, 'learning_rate': 0.003585901007490863, 'epoch': 0.6} + 60%|██████ | 314/520 [19:42<13:23, 3.90s/it] 61%|██████ | 315/520 [19:46<13:05, 3.83s/it] {'loss': 1.3066, 'grad_norm': 0.0005609289580878025, 'learning_rate': 0.0035560345157018515, 'epoch': 0.61} + 61%|██████ | 315/520 [19:46<13:05, 3.83s/it] 61%|██████ | 316/520 [19:50<13:15, 3.90s/it] {'loss': 1.2903, 'grad_norm': 0.0005437872738415601, 'learning_rate': 0.0035262241279454785, 'epoch': 0.61} + 61%|██████ | 316/520 [19:50<13:15, 3.90s/it] 61%|██████ | 317/520 [19:54<13:02, 3.85s/it] {'loss': 1.2661, 'grad_norm': 0.0004832589196584574, 'learning_rate': 0.003496471002478635, 'epoch': 0.61} + 61%|██████ | 317/520 [19:54<13:02, 3.85s/it] 61%|██████ | 318/520 [19:58<12:51, 3.82s/it] {'loss': 1.3991, 'grad_norm': 0.0005960996238847348, 'learning_rate': 0.0034667762953333294, 'epoch': 0.61} + 61%|██████ | 318/520 [19:58<12:51, 3.82s/it] 61%|██████▏ | 319/520 [20:02<13:01, 3.89s/it] {'loss': 1.2605, 'grad_norm': 0.00040171820282230275, 'learning_rate': 0.0034371411602717784, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:02<13:01, 3.89s/it] 62%|██████▏ | 320/520 [20:05<12:46, 3.83s/it] {'loss': 1.1999, 'grad_norm': 0.0005683279057146555, 'learning_rate': 0.0034075667487415786, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:05<12:46, 3.83s/it] 62%|██████▏ | 321/520 [20:09<12:35, 3.79s/it] {'loss': 1.4177, 'grad_norm': 0.00045583012311207333, 'learning_rate': 0.0033780542098309652, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:09<12:35, 3.79s/it] 62%|██████▏ | 322/520 [20:13<12:30, 3.79s/it] {'loss': 1.1808, 'grad_norm': 0.0004904874175961909, 'learning_rate': 0.0033486046902241663, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:13<12:30, 3.79s/it] 62%|██████▏ | 323/520 [20:16<12:20, 3.76s/it] {'loss': 1.2762, 'grad_norm': 0.0005195199093667516, 'learning_rate': 0.003319219334156847, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:16<12:20, 3.76s/it] 62%|██████▏ | 324/520 [20:20<12:11, 3.73s/it] {'loss': 1.3572, 'grad_norm': 0.0004574238019148704, 'learning_rate': 0.0032898992833716566, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:20<12:11, 3.73s/it] 62%|██████▎ | 325/520 [20:24<12:07, 3.73s/it] {'loss': 1.3446, 'grad_norm': 0.0006578004635043645, 'learning_rate': 0.0032606456770738635, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:24<12:07, 3.73s/it] 63%|██████▎ | 326/520 [20:28<12:07, 3.75s/it] {'loss': 1.3566, 'grad_norm': 0.0005356284825378573, 'learning_rate': 0.0032314596518870932, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:28<12:07, 3.75s/it] 63%|██████▎ | 327/520 [20:31<11:58, 3.72s/it] {'loss': 1.2979, 'grad_norm': 0.0004922672716225482, 'learning_rate': 0.0032023423418091625, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:31<11:58, 3.72s/it] 63%|██████▎ | 328/520 [20:35<11:54, 3.72s/it] {'loss': 1.3973, 'grad_norm': 0.0005190011903175821, 'learning_rate': 0.003173294878168025, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:35<11:54, 3.72s/it] 63%|██████▎ | 329/520 [20:39<11:52, 3.73s/it] {'loss': 1.2711, 'grad_norm': 0.0004886972243418778, 'learning_rate': 0.0031443183895778107, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:39<11:52, 3.73s/it] 63%|██████▎ | 330/520 [20:42<11:45, 3.71s/it] {'loss': 1.3722, 'grad_norm': 0.0004925068440564321, 'learning_rate': 0.003115414001894974, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:42<11:45, 3.71s/it] 64%|██████▎ | 331/520 [20:46<11:38, 3.70s/it] {'loss': 1.3078, 'grad_norm': 0.0004605075755041452, 'learning_rate': 0.0030865828381745515, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:46<11:38, 3.70s/it] 64%|██████▍ | 332/520 [20:50<11:35, 3.70s/it] {'loss': 1.3181, 'grad_norm': 0.00041731136632021105, 'learning_rate': 0.0030578260186265266, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:50<11:35, 3.70s/it] 64%|██████▍ | 333/520 [20:54<11:32, 3.71s/it] {'loss': 1.4319, 'grad_norm': 0.0005133573952227065, 'learning_rate': 0.003029144660572304, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:54<11:32, 3.71s/it] 64%|██████▍ | 334/520 [20:57<11:33, 3.73s/it] {'loss': 1.369, 'grad_norm': 0.0005476413972698262, 'learning_rate': 0.003000539878401296, 'epoch': 0.64} + 64%|██████▍ | 334/520 [20:57<11:33, 3.73s/it] 64%|██████▍ | 335/520 [21:01<11:24, 3.70s/it] {'loss': 1.3536, 'grad_norm': 0.0004852373037495533, 'learning_rate': 0.0029720127835276256, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:01<11:24, 3.70s/it] 65%|██████▍ | 336/520 [21:05<11:21, 3.71s/it] {'loss': 1.2767, 'grad_norm': 0.0005997397085053143, 'learning_rate': 0.0029435644843469433, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:05<11:21, 3.71s/it] 65%|██████▍ | 337/520 [21:09<11:27, 3.76s/it] {'loss': 1.2692, 'grad_norm': 0.0005284847673391215, 'learning_rate': 0.002915196086193361, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:09<11:27, 3.76s/it] 65%|██████▌ | 338/520 [21:12<11:28, 3.78s/it] {'loss': 1.3602, 'grad_norm': 0.0005868914516000067, 'learning_rate': 0.0028869086912965036, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:12<11:28, 3.78s/it] 65%|██████▌ | 339/520 [21:16<11:19, 3.76s/it] {'loss': 1.3117, 'grad_norm': 0.0005004961373016941, 'learning_rate': 0.0028587033987386855, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:16<11:19, 3.76s/it] 65%|██████▌ | 340/520 [21:20<11:15, 3.76s/it] {'loss': 1.2879, 'grad_norm': 0.0005376090925467147, 'learning_rate': 0.00283058130441221, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:20<11:15, 3.76s/it] 66%|██████▌ | 341/520 [21:24<11:10, 3.75s/it] {'loss': 1.333, 'grad_norm': 0.0005397634155270971, 'learning_rate': 0.0028025435009767746, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:24<11:10, 3.75s/it] 66%|██████▌ | 342/520 [21:27<11:09, 3.76s/it] {'loss': 1.3317, 'grad_norm': 0.0005324956731008113, 'learning_rate': 0.002774591077817038, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:27<11:09, 3.76s/it] 66%|██████▌ | 343/520 [21:31<11:00, 3.73s/it] {'loss': 1.2309, 'grad_norm': 0.0004476287473913952, 'learning_rate': 0.002746725121000273, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:31<11:00, 3.73s/it] 66%|██████▌ | 344/520 [21:35<10:56, 3.73s/it] {'loss': 1.2679, 'grad_norm': 0.0005644789325818388, 'learning_rate': 0.002718946713234185, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:35<10:56, 3.73s/it] 66%|██████▋ | 345/520 [21:38<10:49, 3.71s/it] {'loss': 1.3871, 'grad_norm': 0.0005225263268530576, 'learning_rate': 0.0026912569338248316, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:38<10:49, 3.71s/it] 67%|██████▋ | 346/520 [21:42<10:44, 3.70s/it] {'loss': 1.2712, 'grad_norm': 0.0005442976705394248, 'learning_rate': 0.0026636568586346897, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:42<10:44, 3.70s/it] 67%|██████▋ | 347/520 [21:46<10:39, 3.70s/it] {'loss': 1.2974, 'grad_norm': 0.0004527416628490153, 'learning_rate': 0.002636147560040866, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:46<10:39, 3.70s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:49<10:34, 3.69s/it] {'loss': 1.2561, 'grad_norm': 0.0006421281350203214, 'learning_rate': 0.0026087301068934104, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:49<10:34, 3.69s/it] 67%|██████▋ | 349/520 [21:53<10:28, 3.67s/it] {'loss': 1.2911, 'grad_norm': 0.0005943242913177522, 'learning_rate': 0.002581405564473801, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:53<10:28, 3.67s/it] 67%|██████▋ | 350/520 [21:57<10:27, 3.69s/it] {'loss': 1.3321, 'grad_norm': 0.0005129366873927589, 'learning_rate': 0.0025541749944535553, 'epoch': 0.67} + 67%|██████▋ | 350/520 [21:57<10:27, 3.69s/it] 68%|██████▊ | 351/520 [22:00<10:22, 3.68s/it] {'loss': 1.2403, 'grad_norm': 0.00045762397458635627, 'learning_rate': 0.002527039454852963, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:00<10:22, 3.68s/it] 68%|██████▊ | 352/520 [22:04<10:18, 3.68s/it] {'loss': 1.3611, 'grad_norm': 0.0005009555317616712, 'learning_rate': 0.0025000000000000014, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:04<10:18, 3.68s/it] 68%|██████▊ | 353/520 [22:08<10:17, 3.70s/it] {'loss': 1.248, 'grad_norm': 0.0004028394808418516, 'learning_rate': 0.002473057680489348, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:08<10:17, 3.70s/it] 68%|██████▊ | 354/520 [22:12<10:11, 3.68s/it] {'loss': 1.3411, 'grad_norm': 0.00043754167567637683, 'learning_rate': 0.0024462135431415734, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:12<10:11, 3.68s/it] 68%|██████▊ | 355/520 [22:15<10:07, 3.68s/it] {'loss': 1.32, 'grad_norm': 0.00046495639280294077, 'learning_rate': 0.0024194686309624664, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:15<10:07, 3.68s/it] 68%|██████▊ | 356/520 [22:19<10:04, 3.68s/it] {'loss': 1.3304, 'grad_norm': 0.0005323577170625181, 'learning_rate': 0.00239282398310251, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:19<10:04, 3.68s/it] 69%|██████▊ | 357/520 [22:23<09:58, 3.67s/it] {'loss': 1.3522, 'grad_norm': 0.00045194372836357985, 'learning_rate': 0.002366280634816496, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:23<09:58, 3.67s/it] 69%|██████▉ | 358/520 [22:26<09:56, 3.68s/it] {'loss': 1.273, 'grad_norm': 0.0004841273276784089, 'learning_rate': 0.0023398396174233176, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:26<09:56, 3.68s/it] 69%|██████▉ | 359/520 [22:30<09:52, 3.68s/it] {'loss': 1.2828, 'grad_norm': 0.0004921979155163131, 'learning_rate': 0.00231350195826588, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:30<09:52, 3.68s/it] 69%|██████▉ | 360/520 [22:34<09:50, 3.69s/it] {'loss': 1.289, 'grad_norm': 0.0004984620216914757, 'learning_rate': 0.0022872686806712033, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:34<09:50, 3.69s/it] 69%|██████▉ | 361/520 [22:37<09:47, 3.69s/it] {'loss': 1.3097, 'grad_norm': 0.0004487531749872309, 'learning_rate': 0.0022611408039106442, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:37<09:47, 3.69s/it] 70%|██████▉ | 362/520 [22:41<09:41, 3.68s/it] {'loss': 1.3134, 'grad_norm': 0.0005118819936369498, 'learning_rate': 0.002235119343160303, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:41<09:41, 3.68s/it] 70%|██████▉ | 363/520 [22:45<09:38, 3.68s/it] {'loss': 1.3485, 'grad_norm': 0.00047816037484374105, 'learning_rate': 0.002209205309461581, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:45<09:38, 3.68s/it] 70%|███████ | 364/520 [22:48<09:34, 3.68s/it] {'loss': 1.3151, 'grad_norm': 0.000511476816661959, 'learning_rate': 0.0021833997096818896, 'epoch': 0.7} + 70%|███████ | 364/520 [22:48<09:34, 3.68s/it] 70%|███████ | 365/520 [22:52<09:31, 3.68s/it] {'loss': 1.3894, 'grad_norm': 0.0004921716365261249, 'learning_rate': 0.002157703546475539, 'epoch': 0.7} + 70%|███████ | 365/520 [22:52<09:31, 3.68s/it] 70%|███████ | 366/520 [22:56<09:29, 3.70s/it] {'loss': 1.3809, 'grad_norm': 0.0004616091710467202, 'learning_rate': 0.0021321178182447708, 'epoch': 0.7} + 70%|███████ | 366/520 [22:56<09:29, 3.70s/it] 71%|███████ | 367/520 [22:59<09:25, 3.69s/it] {'loss': 1.3785, 'grad_norm': 0.0005660182407527328, 'learning_rate': 0.0021066435191009715, 'epoch': 0.71} + 71%|███████ | 367/520 [22:59<09:25, 3.69s/it] 71%|███████ | 368/520 [23:03<09:21, 3.69s/it] {'loss': 1.2119, 'grad_norm': 0.0005075733807540907, 'learning_rate': 0.002081281638826052, 'epoch': 0.71} + 71%|███████ | 368/520 [23:03<09:21, 3.69s/it] 71%|███████ | 369/520 [23:07<09:16, 3.69s/it] {'loss': 1.2786, 'grad_norm': 0.0004328092369254247, 'learning_rate': 0.002056033162833977, 'epoch': 0.71} + 71%|███████ | 369/520 [23:07<09:16, 3.69s/it] 71%|███████ | 370/520 [23:11<09:14, 3.70s/it] {'loss': 1.2778, 'grad_norm': 0.0005231757827740109, 'learning_rate': 0.0020308990721324928, 'epoch': 0.71} + 71%|███████ | 370/520 [23:11<09:14, 3.70s/it] 71%|███████▏ | 371/520 [23:14<09:07, 3.68s/it] {'loss': 1.2681, 'grad_norm': 0.0005642466328585192, 'learning_rate': 0.0020058803432849988, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:14<09:07, 3.68s/it] 72%|███████▏ | 372/520 [23:18<09:03, 3.67s/it] {'loss': 1.3273, 'grad_norm': 0.0004018461473549162, 'learning_rate': 0.001980977948372612, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:18<09:03, 3.67s/it] 72%|███████▏ | 373/520 [23:21<08:57, 3.66s/it] {'loss': 1.2379, 'grad_norm': 0.0005121655725869409, 'learning_rate': 0.0019561928549563967, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:21<08:57, 3.66s/it] 72%|███████▏ | 374/520 [23:25<08:56, 3.68s/it] {'loss': 1.36, 'grad_norm': 0.00048362277828056896, 'learning_rate': 0.0019315260260397637, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:25<08:56, 3.68s/it] 72%|███████▏ | 375/520 [23:29<08:53, 3.68s/it] {'loss': 1.2929, 'grad_norm': 0.0005191454555961827, 'learning_rate': 0.0019069784200310591, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:29<08:53, 3.68s/it] 72%|███████▏ | 376/520 [23:33<08:51, 3.69s/it] {'loss': 1.387, 'grad_norm': 0.00046331933264219353, 'learning_rate': 0.0018825509907063327, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:33<08:51, 3.69s/it] 72%|███████▎ | 377/520 [23:36<08:49, 3.70s/it] {'loss': 1.2994, 'grad_norm': 0.0005810750323517536, 'learning_rate': 0.0018582446871722635, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:36<08:49, 3.70s/it] 73%|███████▎ | 378/520 [23:40<08:45, 3.70s/it] {'loss': 1.3787, 'grad_norm': 0.00046386341350388577, 'learning_rate': 0.0018340604538293016, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:40<08:45, 3.70s/it] 73%|███████▎ | 379/520 [23:44<08:42, 3.70s/it] {'loss': 1.3277, 'grad_norm': 0.00042365855751695067, 'learning_rate': 0.0018099992303349578, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:44<08:42, 3.70s/it] 73%|███████▎ | 380/520 [23:47<08:38, 3.70s/it] {'loss': 1.32, 'grad_norm': 0.0005369922284779795, 'learning_rate': 0.0017860619515673033, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:47<08:38, 3.70s/it] 73%|███████▎ | 381/520 [23:51<08:34, 3.70s/it] {'loss': 1.3549, 'grad_norm': 0.0005031400002820961, 'learning_rate': 0.0017622495475886485, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:51<08:34, 3.70s/it] 73%|███████▎ | 382/520 [23:55<08:32, 3.71s/it] {'loss': 1.2949, 'grad_norm': 0.00046316343945773235, 'learning_rate': 0.0017385629436093958, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:55<08:32, 3.71s/it] 74%|███████▎ | 383/520 [23:59<08:29, 3.72s/it] {'loss': 1.1956, 'grad_norm': 0.0006394183954583762, 'learning_rate': 0.0017150030599520983, 'epoch': 0.74} + 74%|███████▎ | 383/520 [23:59<08:29, 3.72s/it] 74%|███████▍ | 384/520 [24:02<08:25, 3.72s/it] {'loss': 1.2914, 'grad_norm': 0.0005996562080168356, 'learning_rate': 0.0016915708120157041, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:02<08:25, 3.72s/it] 74%|███████▍ | 385/520 [24:06<08:19, 3.70s/it] {'loss': 1.3415, 'grad_norm': 0.0004550500743562536, 'learning_rate': 0.0016682671102399805, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:06<08:19, 3.70s/it] 74%|███████▍ | 386/520 [24:10<08:23, 3.76s/it] {'loss': 1.2951, 'grad_norm': 0.00042483523067040875, 'learning_rate': 0.0016450928600701504, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:10<08:23, 3.76s/it] 74%|███████▍ | 387/520 [24:14<08:23, 3.79s/it] {'loss': 1.329, 'grad_norm': 0.0004833399436598073, 'learning_rate': 0.0016220489619216988, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:14<08:23, 3.79s/it] 75%|███████▍ | 388/520 [24:18<08:24, 3.82s/it] {'loss': 1.2572, 'grad_norm': 0.0005449222264887834, 'learning_rate': 0.0015991363111454021, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:18<08:24, 3.82s/it] 75%|███████▍ | 389/520 [24:21<08:20, 3.82s/it] {'loss': 1.3242, 'grad_norm': 0.000632816131159964, 'learning_rate': 0.0015763557979925325, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:21<08:20, 3.82s/it] 75%|███████▌ | 390/520 [24:25<08:15, 3.81s/it] {'loss': 1.3701, 'grad_norm': 0.000461121836824817, 'learning_rate': 0.0015537083075802649, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:25<08:15, 3.81s/it] 75%|███████▌ | 391/520 [24:29<08:08, 3.79s/it] {'loss': 1.4172, 'grad_norm': 0.0005118887214506462, 'learning_rate': 0.0015311947198572917, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:29<08:08, 3.79s/it] 75%|███████▌ | 392/520 [24:33<08:00, 3.76s/it] {'loss': 1.2714, 'grad_norm': 0.00044635546905375634, 'learning_rate': 0.0015088159095696363, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:33<08:00, 3.76s/it] 76%|███████▌ | 393/520 [24:36<07:56, 3.75s/it] {'loss': 1.2013, 'grad_norm': 0.0004270735612955424, 'learning_rate': 0.0014865727462266543, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:36<07:56, 3.75s/it] 76%|███████▌ | 394/520 [24:40<07:53, 3.75s/it] {'loss': 1.3454, 'grad_norm': 0.0005033622866612395, 'learning_rate': 0.0014644660940672626, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:40<07:53, 3.75s/it] 76%|███████▌ | 395/520 [24:44<07:50, 3.76s/it] {'loss': 1.3081, 'grad_norm': 0.0005407064150577695, 'learning_rate': 0.0014424968120263504, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:44<07:50, 3.76s/it] 76%|███████▌ | 396/520 [24:48<07:43, 3.74s/it] {'loss': 1.3649, 'grad_norm': 0.0005235112264213269, 'learning_rate': 0.001420665753701408, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:48<07:43, 3.74s/it] 76%|███████▋ | 397/520 [24:51<07:38, 3.72s/it] {'loss': 1.3377, 'grad_norm': 0.00046831236135584143, 'learning_rate': 0.0013989737673193682, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:51<07:38, 3.72s/it] 77%|███████▋ | 398/520 [24:55<07:37, 3.75s/it] {'loss': 1.3531, 'grad_norm': 0.0005564022192471993, 'learning_rate': 0.0013774216957036368, 'epoch': 0.77} + 77%|███████▋ | 398/520 [24:55<07:37, 3.75s/it] 77%|███████▋ | 399/520 [24:59<07:32, 3.74s/it] {'loss': 1.2276, 'grad_norm': 0.00047850586593427307, 'learning_rate': 0.0013560103762413583, 'epoch': 0.77} + 77%|███████▋ | 399/520 [24:59<07:32, 3.74s/it] 77%|███████▋ | 400/520 [25:03<07:27, 3.73s/it] {'loss': 1.2755, 'grad_norm': 0.0004535870451657184, 'learning_rate': 0.0013347406408508694, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:03<07:27, 3.73s/it] 77%|███████▋ | 401/520 [25:06<07:22, 3.72s/it] {'loss': 1.1801, 'grad_norm': 0.000623139292063493, 'learning_rate': 0.00131361331594938, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:06<07:22, 3.72s/it] 77%|███████▋ | 402/520 [25:10<07:18, 3.72s/it] {'loss': 1.3114, 'grad_norm': 0.0005272925409961114, 'learning_rate': 0.0012926292224208662, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:10<07:18, 3.72s/it] 78%|███████▊ | 403/520 [25:14<07:15, 3.73s/it] {'loss': 1.3315, 'grad_norm': 0.0005681796474474386, 'learning_rate': 0.0012717891755841722, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:14<07:15, 3.73s/it] 78%|███████▊ | 404/520 [25:18<07:17, 3.77s/it] {'loss': 1.2644, 'grad_norm': 0.0006093289118964399, 'learning_rate': 0.0012510939851613284, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:18<07:17, 3.77s/it] 78%|███████▊ | 405/520 [25:21<07:15, 3.79s/it] {'loss': 1.2537, 'grad_norm': 0.0006326016496691407, 'learning_rate': 0.0012305444552461009, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:21<07:15, 3.79s/it] 78%|███████▊ | 406/520 [25:25<07:15, 3.82s/it] {'loss': 1.1997, 'grad_norm': 0.0006465849401522999, 'learning_rate': 0.0012101413842727344, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:25<07:15, 3.82s/it] 78%|███████▊ | 407/520 [25:29<07:13, 3.84s/it] {'loss': 1.3972, 'grad_norm': 0.0005410970545160357, 'learning_rate': 0.001189885564984946, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:29<07:13, 3.84s/it] 78%|███████▊ | 408/520 [25:33<07:09, 3.84s/it] {'loss': 1.3373, 'grad_norm': 0.0005794881789749252, 'learning_rate': 0.0011697777844051104, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:33<07:09, 3.84s/it] 79%|███████▊ | 409/520 [25:37<07:05, 3.84s/it] {'loss': 1.471, 'grad_norm': 0.0005840606641928336, 'learning_rate': 0.001149818823803686, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:37<07:05, 3.84s/it] 79%|███████▉ | 410/520 [25:41<07:03, 3.85s/it] {'loss': 1.1961, 'grad_norm': 0.0005713657125208064, 'learning_rate': 0.001130009458668863, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:41<07:03, 3.85s/it] 79%|███████▉ | 411/520 [25:45<07:03, 3.88s/it] {'loss': 1.4271, 'grad_norm': 0.0005701952516066741, 'learning_rate': 0.0011103504586764262, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:45<07:03, 3.88s/it] 79%|███████▉ | 412/520 [25:49<07:05, 3.94s/it] {'loss': 1.3325, 'grad_norm': 0.0004928842339415369, 'learning_rate': 0.001090842587659851, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:49<07:05, 3.94s/it] 79%|███████▉ | 413/520 [25:53<07:07, 3.99s/it] {'loss': 1.2812, 'grad_norm': 0.0006105425413934667, 'learning_rate': 0.0010714866035806325, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:53<07:07, 3.99s/it] 80%|███████▉ | 414/520 [25:57<07:07, 4.03s/it] {'loss': 1.0806, 'grad_norm': 0.0005197755188922386, 'learning_rate': 0.0010522832584988235, 'epoch': 0.8} + 80%|███████▉ | 414/520 [25:57<07:07, 4.03s/it] 80%|███████▉ | 415/520 [26:01<07:05, 4.06s/it] {'loss': 1.3171, 'grad_norm': 0.0005485659633014944, 'learning_rate': 0.0010332332985438248, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:01<07:05, 4.06s/it] 80%|████████ | 416/520 [26:05<07:04, 4.08s/it] {'loss': 1.2404, 'grad_norm': 0.0005931181438748611, 'learning_rate': 0.0010143374638853892, 'epoch': 0.8} + 80%|████████ | 416/520 [26:05<07:04, 4.08s/it] 80%|████████ | 417/520 [26:09<06:55, 4.04s/it] {'loss': 1.377, 'grad_norm': 0.0005116158899542035, 'learning_rate': 0.0009955964887048607, 'epoch': 0.8} + 80%|████████ | 417/520 [26:09<06:55, 4.04s/it] 80%|████████ | 418/520 [26:13<06:41, 3.93s/it] {'loss': 1.3753, 'grad_norm': 0.0005256480864034743, 'learning_rate': 0.0009770111011666582, 'epoch': 0.8} + 80%|████████ | 418/520 [26:13<06:41, 3.93s/it] 81%|████████ | 419/520 [26:17<06:31, 3.87s/it] {'loss': 1.3901, 'grad_norm': 0.0005502397654903829, 'learning_rate': 0.0009585820233899739, 'epoch': 0.81} + 81%|████████ | 419/520 [26:17<06:31, 3.87s/it] 81%|████████ | 420/520 [26:20<06:21, 3.81s/it] {'loss': 1.267, 'grad_norm': 0.0006377502155361911, 'learning_rate': 0.0009403099714207175, 'epoch': 0.81} + 81%|████████ | 420/520 [26:20<06:21, 3.81s/it] 81%|████████ | 421/520 [26:24<06:13, 3.78s/it] {'loss': 1.2083, 'grad_norm': 0.0006576069452333285, 'learning_rate': 0.0009221956552036992, 'epoch': 0.81} + 81%|████████ | 421/520 [26:24<06:13, 3.78s/it] 81%|████████ | 422/520 [26:28<06:06, 3.74s/it] {'loss': 1.328, 'grad_norm': 0.0005898851574709862, 'learning_rate': 0.0009042397785550405, 'epoch': 0.81} + 81%|████████ | 422/520 [26:28<06:06, 3.74s/it] 81%|████████▏ | 423/520 [26:31<06:01, 3.73s/it] {'loss': 1.3132, 'grad_norm': 0.000619369284890836, 'learning_rate': 0.0008864430391348333, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:31<06:01, 3.73s/it] 82%|████████▏ | 424/520 [26:35<05:56, 3.71s/it] {'loss': 1.3365, 'grad_norm': 0.00044296124654548, 'learning_rate': 0.0008688061284200266, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:35<05:56, 3.71s/it] 82%|████████▏ | 425/520 [26:39<05:52, 3.71s/it] {'loss': 1.2845, 'grad_norm': 0.0005502063311215155, 'learning_rate': 0.0008513297316775626, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:39<05:52, 3.71s/it] 82%|████████▏ | 426/520 [26:42<05:46, 3.69s/it] {'loss': 1.3646, 'grad_norm': 0.0007380372197707274, 'learning_rate': 0.0008340145279377559, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:42<05:46, 3.69s/it] 82%|████████▏ | 427/520 [26:46<05:43, 3.69s/it] {'loss': 1.221, 'grad_norm': 0.0005300644172767394, 'learning_rate': 0.0008168611899679012, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:46<05:43, 3.69s/it] 82%|████████▏ | 428/520 [26:50<05:43, 3.73s/it] {'loss': 1.2393, 'grad_norm': 0.0005135767432218133, 'learning_rate': 0.000799870384246143, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:50<05:43, 3.73s/it] 82%|████████▎ | 429/520 [26:54<05:44, 3.78s/it] {'loss': 1.3551, 'grad_norm': 0.0005448544865402374, 'learning_rate': 0.0007830427709355725, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:54<05:44, 3.78s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [26:57<05:39, 3.77s/it] {'loss': 1.3378, 'grad_norm': 0.00047451580420767995, 'learning_rate': 0.0007663790038585794, 'epoch': 0.83} + 83%|████████▎ | 430/520 [26:57<05:39, 3.77s/it] 83%|████████▎ | 431/520 [27:01<05:33, 3.74s/it] {'loss': 1.2367, 'grad_norm': 0.0005945095415210776, 'learning_rate': 0.0007498797304714544, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:01<05:33, 3.74s/it] 83%|████████▎ | 432/520 [27:05<05:28, 3.73s/it] {'loss': 1.2536, 'grad_norm': 0.0005137672448103679, 'learning_rate': 0.000733545591839222, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:05<05:28, 3.73s/it] 83%|████████▎ | 433/520 [27:09<05:23, 3.71s/it] {'loss': 1.3715, 'grad_norm': 0.0005367372568801427, 'learning_rate': 0.0007173772226107434, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:09<05:23, 3.71s/it] 83%|████████▎ | 434/520 [27:12<05:18, 3.71s/it] {'loss': 1.1469, 'grad_norm': 0.0005233634793681417, 'learning_rate': 0.0007013752509940485, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:12<05:18, 3.71s/it] 84%|████████▎ | 435/520 [27:16<05:13, 3.69s/it] {'loss': 1.4244, 'grad_norm': 0.0005548305183117678, 'learning_rate': 0.0006855402987319348, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:16<05:13, 3.69s/it] 84%|████████▍ | 436/520 [27:20<05:09, 3.69s/it] {'loss': 1.2196, 'grad_norm': 0.000587320707989483, 'learning_rate': 0.0006698729810778065, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:20<05:09, 3.69s/it] 84%|████████▍ | 437/520 [27:23<05:05, 3.68s/it] {'loss': 1.4283, 'grad_norm': 0.0004925174297299641, 'learning_rate': 0.000654373906771768, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:23<05:05, 3.68s/it] 84%|████████▍ | 438/520 [27:27<05:01, 3.68s/it] {'loss': 1.2501, 'grad_norm': 0.0005544546456715912, 'learning_rate': 0.0006390436780169733, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:27<05:01, 3.68s/it] 84%|████████▍ | 439/520 [27:31<04:57, 3.67s/it] {'loss': 1.2329, 'grad_norm': 0.0004072198674791794, 'learning_rate': 0.0006238828904562316, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:31<04:57, 3.67s/it] 85%|████████▍ | 440/520 [27:34<04:53, 3.66s/it] {'loss': 1.2897, 'grad_norm': 0.0005271802360692747, 'learning_rate': 0.0006088921331488567, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:34<04:53, 3.66s/it] 85%|████████▍ | 441/520 [27:38<04:50, 3.68s/it] {'loss': 1.2353, 'grad_norm': 0.000503627958586733, 'learning_rate': 0.000594071988547788, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:38<04:50, 3.68s/it] 85%|████████▌ | 442/520 [27:42<04:47, 3.68s/it] {'loss': 1.3551, 'grad_norm': 0.0005659213265276439, 'learning_rate': 0.0005794230324769517, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:42<04:47, 3.68s/it] 85%|████████▌ | 443/520 [27:45<04:44, 3.69s/it] {'loss': 1.3572, 'grad_norm': 0.0004961784959968634, 'learning_rate': 0.0005649458341088914, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:45<04:44, 3.69s/it] 85%|████████▌ | 444/520 [27:49<04:41, 3.70s/it] {'loss': 1.3036, 'grad_norm': 0.00048436663589111783, 'learning_rate': 0.0005506409559426573, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:49<04:41, 3.70s/it] 86%|████████▌ | 445/520 [27:53<04:36, 3.68s/it] {'loss': 1.2264, 'grad_norm': 0.0005369709477567917, 'learning_rate': 0.0005365089537819434, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:53<04:36, 3.68s/it] 86%|████████▌ | 446/520 [27:56<04:32, 3.69s/it] {'loss': 1.3225, 'grad_norm': 0.00043546786223472083, 'learning_rate': 0.0005225503767134953, 'epoch': 0.86} + 86%|████████▌ | 446/520 [27:56<04:32, 3.69s/it] 86%|████████▌ | 447/520 [28:00<04:28, 3.68s/it] {'loss': 1.3123, 'grad_norm': 0.0004906412125981902, 'learning_rate': 0.0005087657670857798, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:00<04:28, 3.68s/it] 86%|████████▌ | 448/520 [28:04<04:25, 3.68s/it] {'loss': 1.3061, 'grad_norm': 0.0005370774933519082, 'learning_rate': 0.0004951556604879049, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:04<04:25, 3.68s/it] 86%|████████▋ | 449/520 [28:07<04:21, 3.69s/it] {'loss': 1.2898, 'grad_norm': 0.0005137019866998957, 'learning_rate': 0.0004817205857288176, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:07<04:21, 3.69s/it] 87%|████████▋ | 450/520 [28:11<04:17, 3.68s/it] {'loss': 1.342, 'grad_norm': 0.0005790325673750725, 'learning_rate': 0.0004684610648167503, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:11<04:17, 3.68s/it] 87%|████████▋ | 451/520 [28:15<04:14, 3.69s/it] {'loss': 1.3357, 'grad_norm': 0.0005378874719862742, 'learning_rate': 0.0004553776129389453, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:15<04:14, 3.69s/it] 87%|████████▋ | 452/520 [28:19<04:11, 3.69s/it] {'loss': 1.3185, 'grad_norm': 0.00048718454165327653, 'learning_rate': 0.0004424707384416343, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:19<04:11, 3.69s/it] 87%|████████▋ | 453/520 [28:22<04:06, 3.68s/it] {'loss': 1.3198, 'grad_norm': 0.0005158882502065457, 'learning_rate': 0.000429740942810285, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:22<04:06, 3.68s/it] 87%|████████▋ | 454/520 [28:26<04:03, 3.69s/it] {'loss': 1.2383, 'grad_norm': 0.00047882219206703166, 'learning_rate': 0.00041718872065011904, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:26<04:03, 3.69s/it] 88%|████████▊ | 455/520 [28:30<03:59, 3.69s/it] {'loss': 1.3738, 'grad_norm': 0.0005175389167227269, 'learning_rate': 0.00040481455966689673, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:30<03:59, 3.69s/it] 88%|████████▊ | 456/520 [28:33<03:54, 3.67s/it] {'loss': 1.3361, 'grad_norm': 0.0005775237688730736, 'learning_rate': 0.00039261894064796134, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:33<03:54, 3.67s/it] 88%|████████▊ | 457/520 [28:37<03:52, 3.70s/it] {'loss': 1.1511, 'grad_norm': 0.0005040240847439299, 'learning_rate': 0.0003806023374435663, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:37<03:52, 3.70s/it] 88%|████████▊ | 458/520 [28:41<03:49, 3.70s/it] {'loss': 1.4505, 'grad_norm': 0.0005096663554044903, 'learning_rate': 0.00036876521694845677, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:41<03:49, 3.70s/it] 88%|████████▊ | 459/520 [28:44<03:46, 3.71s/it] {'loss': 1.3629, 'grad_norm': 0.000551184205307139, 'learning_rate': 0.00035710803908373226, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:44<03:46, 3.71s/it] 88%|████████▊ | 460/520 [28:48<03:42, 3.71s/it] {'loss': 1.2673, 'grad_norm': 0.0005778576074347149, 'learning_rate': 0.0003456312567789793, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:48<03:42, 3.71s/it] 89%|████████▊ | 461/520 [28:52<03:38, 3.71s/it] {'loss': 1.2288, 'grad_norm': 0.0004227680413766238, 'learning_rate': 0.0003343353159546675, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:52<03:38, 3.71s/it] 89%|████████▉ | 462/520 [28:55<03:34, 3.70s/it] {'loss': 1.3817, 'grad_norm': 0.0004698741718454418, 'learning_rate': 0.00032322065550483003, 'epoch': 0.89} + 89%|████████▉ | 462/520 [28:55<03:34, 3.70s/it] 89%|████████▉ | 463/520 [28:59<03:30, 3.70s/it] {'loss': 1.2588, 'grad_norm': 0.0005997948658276505, 'learning_rate': 0.00031228770728000454, 'epoch': 0.89} + 89%|████████▉ | 463/520 [28:59<03:30, 3.70s/it] 89%|████████▉ | 464/520 [29:03<03:26, 3.68s/it] {'loss': 1.3513, 'grad_norm': 0.0005401351912414892, 'learning_rate': 0.00030153689607045843, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:03<03:26, 3.68s/it] 89%|████████▉ | 465/520 [29:07<03:23, 3.69s/it] {'loss': 1.4609, 'grad_norm': 0.0005295418662533015, 'learning_rate': 0.00029096863958968266, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:07<03:23, 3.69s/it] 90%|████████▉ | 466/520 [29:10<03:19, 3.69s/it] {'loss': 1.3574, 'grad_norm': 0.00047255853121821756, 'learning_rate': 0.0002805833484581621, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:10<03:19, 3.69s/it] 90%|████████▉ | 467/520 [29:14<03:16, 3.70s/it] {'loss': 1.2412, 'grad_norm': 0.0004878224290520071, 'learning_rate': 0.0002703814261874199, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:14<03:16, 3.70s/it] 90%|█████████ | 468/520 [29:18<03:11, 3.69s/it] {'loss': 1.3089, 'grad_norm': 0.0006000891827464224, 'learning_rate': 0.0002603632691643415, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:18<03:11, 3.69s/it] 90%|█████████ | 469/520 [29:21<03:08, 3.69s/it] {'loss': 1.375, 'grad_norm': 0.0006040773223330581, 'learning_rate': 0.00025052926663577004, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:21<03:08, 3.69s/it] 90%|█████████ | 470/520 [29:25<03:05, 3.71s/it] {'loss': 1.255, 'grad_norm': 0.0006117334728930308, 'learning_rate': 0.00024087980069338822, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:25<03:05, 3.71s/it] 91%|█████████ | 471/520 [29:29<03:01, 3.70s/it] {'loss': 1.3061, 'grad_norm': 0.0005878741160265762, 'learning_rate': 0.0002314152462588659, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:29<03:01, 3.70s/it] 91%|█████████ | 472/520 [29:32<02:57, 3.69s/it] {'loss': 1.2837, 'grad_norm': 0.0005634223406789187, 'learning_rate': 0.00022213597106929606, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:32<02:57, 3.69s/it] 91%|█████████ | 473/520 [29:36<02:53, 3.70s/it] {'loss': 1.3438, 'grad_norm': 0.0005525820480555378, 'learning_rate': 0.00021304233566290964, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:36<02:53, 3.70s/it] 91%|█████████ | 474/520 [29:40<02:50, 3.70s/it] {'loss': 1.2835, 'grad_norm': 0.0004519407865195526, 'learning_rate': 0.0002041346933650612, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:40<02:50, 3.70s/it] 91%|█████████▏| 475/520 [29:44<02:46, 3.70s/it] {'loss': 1.1965, 'grad_norm': 0.0004920790231886722, 'learning_rate': 0.00019541339027450257, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:44<02:46, 3.70s/it] 92%|█████████▏| 476/520 [29:47<02:42, 3.69s/it] {'loss': 1.314, 'grad_norm': 0.000507226162060866, 'learning_rate': 0.00018687876524993985, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:47<02:42, 3.69s/it] 92%|█████████▏| 477/520 [29:51<02:38, 3.69s/it] {'loss': 1.3234, 'grad_norm': 0.0006101705564109783, 'learning_rate': 0.0001785311498968617, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:51<02:38, 3.69s/it] 92%|█████████▏| 478/520 [29:55<02:35, 3.70s/it] {'loss': 1.2578, 'grad_norm': 0.0004784352852444732, 'learning_rate': 0.000170370868554659, 'epoch': 0.92} + 92%|█████████▏| 478/520 [29:55<02:35, 3.70s/it] 92%|█████████▏| 479/520 [29:58<02:31, 3.69s/it] {'loss': 1.2681, 'grad_norm': 0.0005243636963332597, 'learning_rate': 0.00016239823828401945, 'epoch': 0.92} + 92%|█████████▏| 479/520 [29:58<02:31, 3.69s/it] 92%|█████████▏| 480/520 [30:02<02:27, 3.69s/it] {'loss': 1.2704, 'grad_norm': 0.0005221062288168034, 'learning_rate': 0.00015461356885461075, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:02<02:27, 3.69s/it] 92%|█████████▎| 481/520 [30:06<02:24, 3.71s/it] {'loss': 1.2694, 'grad_norm': 0.000495681570199952, 'learning_rate': 0.0001470171627330452, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:06<02:24, 3.71s/it] 93%|█████████▎| 482/520 [30:09<02:20, 3.70s/it] {'loss': 1.2883, 'grad_norm': 0.000494190015325122, 'learning_rate': 0.0001396093150711275, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:09<02:20, 3.70s/it] 93%|█████████▎| 483/520 [30:13<02:16, 3.70s/it] {'loss': 1.3214, 'grad_norm': 0.0006556132466368655, 'learning_rate': 0.00013239031369438327, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:13<02:16, 3.70s/it] 93%|█████████▎| 484/520 [30:17<02:13, 3.71s/it] {'loss': 1.3304, 'grad_norm': 0.00048369166727317477, 'learning_rate': 0.0001253604390908819, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:17<02:13, 3.71s/it] 93%|█████████▎| 485/520 [30:21<02:09, 3.71s/it] {'loss': 1.2706, 'grad_norm': 0.0004909925733274851, 'learning_rate': 0.00011851996440033319, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:21<02:09, 3.71s/it] 93%|█████████▎| 486/520 [30:24<02:06, 3.71s/it] {'loss': 1.4072, 'grad_norm': 0.0005475893659347341, 'learning_rate': 0.00011186915540347731, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:24<02:06, 3.71s/it] 94%|█████████▎| 487/520 [30:28<02:02, 3.71s/it] {'loss': 1.2689, 'grad_norm': 0.0004981852401279703, 'learning_rate': 0.00010540827051175817, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:28<02:02, 3.71s/it] 94%|█████████▍| 488/520 [30:32<01:58, 3.69s/it] {'loss': 1.2243, 'grad_norm': 0.0005392810154212827, 'learning_rate': 9.913756075728087e-05, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:32<01:58, 3.69s/it] 94%|█████████▍| 489/520 [30:35<01:54, 3.69s/it] {'loss': 1.2794, 'grad_norm': 0.0004397520437267786, 'learning_rate': 9.305726978306172e-05, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:35<01:54, 3.69s/it] 94%|█████████▍| 490/520 [30:39<01:50, 3.68s/it] {'loss': 1.3298, 'grad_norm': 0.0006111633168108029, 'learning_rate': 8.716763383355864e-05, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:39<01:50, 3.68s/it] 94%|█████████▍| 491/520 [30:43<01:46, 3.68s/it] {'loss': 1.3086, 'grad_norm': 0.000605164649196958, 'learning_rate': 8.146888174549338e-05, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:43<01:46, 3.68s/it] 95%|█████████▍| 492/520 [30:46<01:42, 3.68s/it] {'loss': 1.4033, 'grad_norm': 0.000515805993106248, 'learning_rate': 7.59612349389599e-05, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:46<01:42, 3.68s/it] 95%|█████████▍| 493/520 [30:50<01:39, 3.68s/it] {'loss': 1.2812, 'grad_norm': 0.0004967742693896666, 'learning_rate': 7.064490740882056e-05, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:50<01:39, 3.68s/it] 95%|█████████▌| 494/520 [30:54<01:35, 3.69s/it] {'loss': 1.3147, 'grad_norm': 0.00046586762137538974, 'learning_rate': 6.552010571639455e-05, 'epoch': 0.95} + 95%|█████████▌| 494/520 [30:54<01:35, 3.69s/it] 95%|█████████▌| 495/520 [30:57<01:32, 3.69s/it] {'loss': 1.3298, 'grad_norm': 0.0005349346624874598, 'learning_rate': 6.058702898142643e-05, 'epoch': 0.95} + 95%|█████████▌| 495/520 [30:57<01:32, 3.69s/it] 95%|█████████▌| 496/520 [31:01<01:28, 3.69s/it] {'loss': 1.2663, 'grad_norm': 0.0005637725947876187, 'learning_rate': 5.584586887435739e-05, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:01<01:28, 3.69s/it] 96%|█████████▌| 497/520 [31:05<01:24, 3.70s/it] {'loss': 1.2243, 'grad_norm': 0.0004560594536110935, 'learning_rate': 5.129680960887006e-05, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:05<01:24, 3.70s/it] 96%|█████████▌| 498/520 [31:08<01:21, 3.69s/it] {'loss': 1.3117, 'grad_norm': 0.0006421642995052578, 'learning_rate': 4.694002793473595e-05, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:08<01:21, 3.69s/it] 96%|█████████▌| 499/520 [31:12<01:17, 3.69s/it] {'loss': 1.3807, 'grad_norm': 0.0004646748124843404, 'learning_rate': 4.277569313094809e-05, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:12<01:17, 3.69s/it] 96%|█████████▌| 500/520 [31:16<01:13, 3.69s/it] {'loss': 1.451, 'grad_norm': 0.0006090001203520792, 'learning_rate': 3.8803966999139685e-05, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:16<01:13, 3.69s/it] 96%|█████████▋| 501/520 [31:20<01:10, 3.71s/it] {'loss': 1.2777, 'grad_norm': 0.0005610572338855423, 'learning_rate': 3.502500385730189e-05, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:20<01:10, 3.71s/it] 97%|█████████▋| 502/520 [31:23<01:06, 3.72s/it] {'loss': 1.3569, 'grad_norm': 0.0005145363592622278, 'learning_rate': 3.143895053378698e-05, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:23<01:06, 3.72s/it] 97%|█████████▋| 503/520 [31:27<01:03, 3.71s/it] {'loss': 1.2435, 'grad_norm': 0.0005187509165044863, 'learning_rate': 2.8045946361601182e-05, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:27<01:03, 3.71s/it] 97%|█████████▋| 504/520 [31:31<00:59, 3.69s/it] {'loss': 1.3368, 'grad_norm': 0.0007076181389695235, 'learning_rate': 2.4846123172992953e-05, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:31<00:59, 3.69s/it] 97%|█████████▋| 505/520 [31:34<00:55, 3.69s/it] {'loss': 1.3595, 'grad_norm': 0.0006081605166457785, 'learning_rate': 2.1839605294330932e-05, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:34<00:55, 3.69s/it] 97%|█████████▋| 506/520 [31:38<00:51, 3.69s/it] {'loss': 1.2909, 'grad_norm': 0.0006054003327279466, 'learning_rate': 1.9026509541272274e-05, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:38<00:51, 3.69s/it] 98%|█████████▊| 507/520 [31:42<00:47, 3.68s/it] {'loss': 1.3873, 'grad_norm': 0.00045046854011922743, 'learning_rate': 1.640694521422459e-05, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:42<00:47, 3.68s/it] 98%|█████████▊| 508/520 [31:45<00:44, 3.68s/it] {'loss': 1.4252, 'grad_norm': 0.00048653729508901685, 'learning_rate': 1.3981014094099354e-05, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:45<00:44, 3.68s/it] 98%|█████████▊| 509/520 [31:49<00:40, 3.69s/it] {'loss': 1.3889, 'grad_norm': 0.0005187369171793799, 'learning_rate': 1.1748810438355628e-05, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:49<00:40, 3.69s/it] 98%|█████████▊| 510/520 [31:53<00:36, 3.69s/it] {'loss': 1.3282, 'grad_norm': 0.0005817896842734919, 'learning_rate': 9.710420977340761e-06, 'epoch': 0.98} + 98%|█████████▊| 510/520 [31:53<00:36, 3.69s/it] 98%|█████████▊| 511/520 [31:57<00:33, 3.70s/it] {'loss': 1.2771, 'grad_norm': 0.0005708517199888487, 'learning_rate': 7.865924910916978e-06, 'epoch': 0.98} + 98%|█████████▊| 511/520 [31:57<00:33, 3.70s/it] 98%|█████████▊| 512/520 [32:00<00:29, 3.70s/it] {'loss': 1.1777, 'grad_norm': 0.0005098334734410014, 'learning_rate': 6.215393905388278e-06, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:00<00:29, 3.70s/it] 99%|█████████▊| 513/520 [32:04<00:25, 3.71s/it] {'loss': 1.396, 'grad_norm': 0.0005881459845143773, 'learning_rate': 4.758892090711009e-06, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:04<00:25, 3.71s/it] 99%|█████████▉| 514/520 [32:08<00:22, 3.71s/it] {'loss': 1.3447, 'grad_norm': 0.0005021586191190668, 'learning_rate': 3.496476058006959e-06, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:08<00:22, 3.71s/it] 99%|█████████▉| 515/520 [32:11<00:18, 3.70s/it] {'loss': 1.4303, 'grad_norm': 0.0006018822751545247, 'learning_rate': 2.4281948573617874e-06, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:11<00:18, 3.70s/it] 99%|█████████▉| 516/520 [32:15<00:14, 3.71s/it] {'loss': 1.3407, 'grad_norm': 0.00060722813414372, 'learning_rate': 1.5540899959187727e-06, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:15<00:14, 3.71s/it] 99%|█████████▉| 517/520 [32:19<00:11, 3.71s/it] {'loss': 1.2801, 'grad_norm': 0.0004512329000509055, 'learning_rate': 8.741954362678772e-07, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:19<00:11, 3.71s/it] 100%|█████████▉| 518/520 [32:22<00:07, 3.68s/it] {'loss': 1.3172, 'grad_norm': 0.0007351016203619977, 'learning_rate': 3.885375951256931e-07, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:22<00:07, 3.68s/it] 100%|█████████▉| 519/520 [32:26<00:03, 3.71s/it] {'loss': 1.2582, 'grad_norm': 0.00047441838492849386, 'learning_rate': 9.713534230904042e-08, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:26<00:03, 3.71s/it] 100%|██████████| 520/520 [32:31<00:00, 3.98s/it] {'loss': 1.2472, 'grad_norm': 0.0006568464329300767, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:31<00:00, 3.98s/it] {'train_runtime': 1951.3256, 'train_samples_per_second': 34.094, 'train_steps_per_second': 0.266, 'train_loss': 1.3718746139453009, 'epoch': 1.0} + 100%|██████████| 520/520 [32:31<00:00, 3.98s/it] 100%|██████████| 520/520 [32:31<00:00, 3.75s/it] +[2025-10-09 11:30:56,648] [INFO] [launch.py:348:main] Process 1178059 exits successfully. +[2025-10-09 11:30:57,650] [INFO] [launch.py:348:main] Process 1178060 exits successfully. +[2025-10-09 11:30:57,650] [INFO] [launch.py:348:main] Process 1178057 exits successfully. +[2025-10-09 11:30:57,650] [INFO] [launch.py:348:main] Process 1178058 exits successfully. +[2025-10-09 11:30:57,651] [INFO] [launch.py:348:main] Process 1178061 exits successfully. +[2025-10-09 11:30:57,651] [INFO] [launch.py:348:main] Process 1178056 exits successfully. +[2025-10-09 11:30:57,651] [INFO] [launch.py:348:main] Process 1178055 exits successfully. +[2025-10-09 11:31:01,656] [INFO] [launch.py:348:main] Process 1178054 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_1e-2_connector-3.0_0.5_1e-2_ablation_20251009_105701.log +Timestamp: 2025-10-09 11:31:04 +===================================== diff --git a/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251009_051327.log b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251009_051327.log new file mode 100644 index 0000000000000000000000000000000000000000..edc0b1b0bd664afb05c0b046d53c121c2f161c20 --- /dev/null +++ b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251009_051327.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251009_051327.log +Timestamp: 2025-10-09 05:13:27 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 05:13:29,795] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:13:32,545] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-09 05:13:32,546] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 3 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 3 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 05:13:35,181] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:13:36,211] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-09 05:13:36,211] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-09 05:13:36,212] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-09 05:13:36,212] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-09 05:13:36,212] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-09 05:13:36,212] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-09 05:13:36,212] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-09 05:13:36,214] [INFO] [launch.py:253:main] process 809225 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:13:36,216] [INFO] [launch.py:253:main] process 809226 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:13:36,218] [INFO] [launch.py:253:main] process 809227 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:13:36,220] [INFO] [launch.py:253:main] process 809228 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:13:36,222] [INFO] [launch.py:253:main] process 809229 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:13:36,224] [INFO] [launch.py:253:main] process 809230 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:13:36,226] [INFO] [launch.py:253:main] process 809231 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:13:36,228] [INFO] [launch.py:253:main] process 809232 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 05:13:42,839] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:13:43,097] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:13:43,183] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:13:43,202] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:13:43,205] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:13:43,205] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:13:43,215] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:13:43,226] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:13:43,312] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:13:43,509] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:13:43,595] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:13:43,614] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:13:43,614] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:13:43,619] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:13:43,619] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-09 05:13:43,620] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:13:43,630] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:809225:809225 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809225:809225 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809225:809225 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:809225:809225 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:809225:809225 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:809225:809225 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:809226:809226 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:809226:809226 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809226:809226 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809226:809226 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:809226:809226 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:809226:809226 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:809228:809228 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:809228:809228 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809228:809228 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809228:809228 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:809228:809228 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:809228:809228 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:809229:809229 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:809229:809229 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809229:809229 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809229:809229 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:809229:809229 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:809229:809229 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:809231:809231 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:809231:809231 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809231:809231 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809231:809231 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:809231:809231 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:809231:809231 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:809230:809230 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:809230:809230 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809230:809230 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809230:809230 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:809230:809230 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:809230:809230 [5] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:809232:809232 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:809232:809232 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809232:809232 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809232:809232 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:809232:809232 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:809232:809232 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:809227:809227 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:809227:809227 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809227:809227 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809227:809227 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:809227:809227 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:809227:809227 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO ncclCommInitRank comm 0x563ce4c1e200 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x7dd7533fa9770f7 - Init START +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO ncclCommInitRank comm 0x55aac20d7ac0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x7dd7533fa9770f7 - Init START +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO ncclCommInitRank comm 0x557f0786f950 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x7dd7533fa9770f7 - Init START +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO ncclCommInitRank comm 0x5582d444b460 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x7dd7533fa9770f7 - Init START +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO ncclCommInitRank comm 0x5583eaafae90 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x7dd7533fa9770f7 - Init START +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO ncclCommInitRank comm 0x55a08e3556f0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x7dd7533fa9770f7 - Init START +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO ncclCommInitRank comm 0x55febc425fa0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x7dd7533fa9770f7 - Init START +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO ncclCommInitRank comm 0x562890a8b740 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x7dd7533fa9770f7 - Init START +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO comm 0x55aac20d7ac0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO comm 0x563ce4c1e200 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO comm 0x55a08e3556f0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO comm 0x562890a8b740 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO comm 0x55febc425fa0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO comm 0x5582d444b460 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO comm 0x5583eaafae90 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO comm 0x557f0786f950 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:809230:810850 [5] NCCL INFO ncclCommInitRank comm 0x5582d444b460 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x7dd7533fa9770f7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:809229:810848 [4] NCCL INFO ncclCommInitRank comm 0x5583eaafae90 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x7dd7533fa9770f7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809232:810851 [7] NCCL INFO ncclCommInitRank comm 0x55aac20d7ac0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x7dd7533fa9770f7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:809225:810829 [0] NCCL INFO ncclCommInitRank comm 0x55febc425fa0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x7dd7533fa9770f7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809228:810847 [3] NCCL INFO ncclCommInitRank comm 0x557f0786f950 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x7dd7533fa9770f7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:809226:810830 [1] NCCL INFO ncclCommInitRank comm 0x562890a8b740 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x7dd7533fa9770f7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:809227:810852 [2] NCCL INFO ncclCommInitRank comm 0x55a08e3556f0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x7dd7533fa9770f7 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809231:810849 [6] NCCL INFO ncclCommInitRank comm 0x563ce4c1e200 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x7dd7533fa9770f7 - Init COMPLETE +[2025-10-09 05:14:28,244] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-09 05:15:34,181] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-09 05:15:47,597 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-09 05:15:47,601 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:001->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:809229:815900 [4] NCCL INFO ncclCommInitRank comm 0x7facec06aa50 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x5a372b9aa778ad13 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809231:815904 [6] NCCL INFO ncclCommInitRank comm 0x7f8a7806b340 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x5a372b9aa778ad13 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809227:815906 [2] NCCL INFO ncclCommInitRank comm 0x7f773006a7c0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x5a372b9aa778ad13 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809225:815899 [0] NCCL INFO ncclCommInitRank comm 0x7f5b7806af30 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x5a372b9aa778ad13 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809232:815905 [7] NCCL INFO ncclCommInitRank comm 0x7f7aec06a4d0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x5a372b9aa778ad13 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809228:815901 [3] NCCL INFO ncclCommInitRank comm 0x7fe4f006ac20 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x5a372b9aa778ad13 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809230:815902 [5] NCCL INFO ncclCommInitRank comm 0x7fe21006afa0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x5a372b9aa778ad13 - Init COMPLETE +ywang29-vrdb-test1-worker-0:809226:815903 [1] NCCL INFO ncclCommInitRank comm 0x7f066806a9a0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x5a372b9aa778ad13 - Init COMPLETE + 0%| | 1/520 [00:12<1:47:16, 12.40s/it] {'loss': 2.0453, 'grad_norm': 0.004834393672971837, 'learning_rate': 0.1875, 'epoch': 0.0} + 0%| | 1/520 [00:12<1:47:16, 12.40s/it] 0%| | 2/520 [00:16<1:03:36, 7.37s/it] {'loss': 2.0549, 'grad_norm': 0.005249546008494349, 'learning_rate': 0.375, 'epoch': 0.0} + 0%| | 2/520 [00:16<1:03:36, 7.37s/it] 1%| | 3/520 [00:20<49:37, 5.76s/it] {'loss': 1.6738, 'grad_norm': 0.0017294766665775535, 'learning_rate': 0.5625, 'epoch': 0.01} + 1%| | 3/520 [00:20<49:37, 5.76s/it] 1%| | 4/520 [00:23<43:06, 5.01s/it] {'loss': 1.5353, 'grad_norm': 0.0007857864802527009, 'learning_rate': 0.75, 'epoch': 0.01} + 1%| | 4/520 [00:23<43:06, 5.01s/it] 1%| | 5/520 [00:27<39:26, 4.60s/it] {'loss': 1.5589, 'grad_norm': 0.001284703395763677, 'learning_rate': 0.9375, 'epoch': 0.01} + 1%| | 5/520 [00:27<39:26, 4.60s/it] 1%| | 6/520 [00:31<37:09, 4.34s/it] {'loss': 1.4939, 'grad_norm': 0.004369105926166378, 'learning_rate': 1.125, 'epoch': 0.01} + 1%| | 6/520 [00:31<37:09, 4.34s/it] 1%|▏ | 7/520 [00:35<35:50, 4.19s/it] {'loss': 2.2622, 'grad_norm': 0.029510179502317303, 'learning_rate': 1.3125, 'epoch': 0.01} + 1%|▏ | 7/520 [00:35<35:50, 4.19s/it] 2%|▏ | 8/520 [00:39<36:06, 4.23s/it] {'loss': 5.682, 'grad_norm': 0.17209048985171962, 'learning_rate': 1.5, 'epoch': 0.02} + 2%|▏ | 8/520 [00:39<36:06, 4.23s/it] 2%|▏ | 9/520 [00:44<36:04, 4.24s/it] {'loss': 10.6036, 'grad_norm': 0.5530252736531144, 'learning_rate': 1.6875, 'epoch': 0.02} + 2%|▏ | 9/520 [00:44<36:04, 4.24s/it] 2%|▏ | 10/520 [00:47<34:29, 4.06s/it] {'loss': 6.081, 'grad_norm': 0.24088009056748141, 'learning_rate': 1.875, 'epoch': 0.02} + 2%|▏ | 10/520 [00:47<34:29, 4.06s/it] 2%|▏ | 11/520 [00:51<33:44, 3.98s/it] {'loss': 17.1101, 'grad_norm': 0.5617449996222591, 'learning_rate': 2.0625, 'epoch': 0.02} + 2%|▏ | 11/520 [00:51<33:44, 3.98s/it] 2%|▏ | 12/520 [00:55<32:53, 3.89s/it] {'loss': 19.837, 'grad_norm': 0.49998124115339393, 'learning_rate': 2.25, 'epoch': 0.02} + 2%|▏ | 12/520 [00:55<32:53, 3.89s/it][2025-10-09 05:16:51,790] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [00:59<34:02, 4.03s/it] {'loss': 17.9524, 'grad_norm': 0.10063299977632767, 'learning_rate': 2.4375, 'epoch': 0.03} + 2%|▎ | 13/520 [00:59<34:02, 4.03s/it] 3%|▎ | 14/520 [01:03<33:29, 3.97s/it] {'loss': 15.4749, 'grad_norm': 0.04192689703591883, 'learning_rate': 2.625, 'epoch': 0.03} + 3%|▎ | 14/520 [01:03<33:29, 3.97s/it] 3%|▎ | 15/520 [01:07<32:57, 3.92s/it] {'loss': 14.1822, 'grad_norm': 0.03249710236812597, 'learning_rate': 2.8125, 'epoch': 0.03} + 3%|▎ | 15/520 [01:07<32:57, 3.92s/it] 3%|▎ | 16/520 [01:10<32:23, 3.86s/it] {'loss': 12.3726, 'grad_norm': 0.029460183171290746, 'learning_rate': 3.0, 'epoch': 0.03} + 3%|▎ | 16/520 [01:10<32:23, 3.86s/it] 3%|▎ | 17/520 [01:14<32:07, 3.83s/it] {'loss': 18.037, 'grad_norm': 0.19462142804907787, 'learning_rate': 2.9999708593973073, 'epoch': 0.03} + 3%|▎ | 17/520 [01:14<32:07, 3.83s/it] 3%|▎ | 18/520 [01:18<31:52, 3.81s/it] {'loss': 11.9128, 'grad_norm': 0.02412605024930445, 'learning_rate': 2.9998834387214623, 'epoch': 0.03} + 3%|▎ | 18/520 [01:18<31:52, 3.81s/it] 4%|▎ | 19/520 [01:22<31:39, 3.79s/it] {'loss': 12.5975, 'grad_norm': 0.015961749590656216, 'learning_rate': 2.9997377413691195, 'epoch': 0.04} + 4%|▎ | 19/520 [01:22<31:39, 3.79s/it] 4%|▍ | 20/520 [01:25<31:26, 3.77s/it] {'loss': 12.4419, 'grad_norm': 0.014771045731120423, 'learning_rate': 2.9995337730012244, 'epoch': 0.04} + 4%|▍ | 20/520 [01:25<31:26, 3.77s/it] 4%|▍ | 21/520 [01:29<31:29, 3.79s/it] {'loss': 12.5525, 'grad_norm': 0.011703936716110044, 'learning_rate': 2.9992715415427913, 'epoch': 0.04} + 4%|▍ | 21/520 [01:29<31:29, 3.79s/it] 4%|▍ | 22/520 [01:33<31:26, 3.79s/it] {'loss': 11.87, 'grad_norm': 0.012709489033465948, 'learning_rate': 2.998951057182598, 'epoch': 0.04} + 4%|▍ | 22/520 [01:33<31:26, 3.79s/it] 4%|▍ | 23/520 [01:37<31:06, 3.76s/it] {'loss': 11.5473, 'grad_norm': 0.008228101002636677, 'learning_rate': 2.998572332372787, 'epoch': 0.04} + 4%|▍ | 23/520 [01:37<31:06, 3.76s/it] 5%|▍ | 24/520 [01:40<30:49, 3.73s/it] {'loss': 11.6007, 'grad_norm': 0.0024821588105374184, 'learning_rate': 2.9981353818283836, 'epoch': 0.05} + 5%|▍ | 24/520 [01:40<30:49, 3.73s/it] 5%|▍ | 25/520 [01:44<30:49, 3.74s/it] {'loss': 11.506, 'grad_norm': 0.001401783701924813, 'learning_rate': 2.9976402225267247, 'epoch': 0.05} + 5%|▍ | 25/520 [01:44<30:49, 3.74s/it] 5%|▌ | 26/520 [01:48<30:38, 3.72s/it] {'loss': 11.1805, 'grad_norm': 0.001387646881728322, 'learning_rate': 2.997086873706798, 'epoch': 0.05} + 5%|▌ | 26/520 [01:48<30:38, 3.72s/it] 5%|▌ | 27/520 [01:52<30:30, 3.71s/it] {'loss': 11.0557, 'grad_norm': 0.001338149187980454, 'learning_rate': 2.996475356868493, 'epoch': 0.05} + 5%|▌ | 27/520 [01:52<30:30, 3.71s/it] 5%|▌ | 28/520 [01:55<30:19, 3.70s/it] {'loss': 11.061, 'grad_norm': 0.0010995501681302028, 'learning_rate': 2.99580569577177, 'epoch': 0.05} + 5%|▌ | 28/520 [01:55<30:19, 3.70s/it] 6%|▌ | 29/520 [01:59<30:14, 3.70s/it] {'loss': 10.8793, 'grad_norm': 0.0009292754028298887, 'learning_rate': 2.995077916435733, 'epoch': 0.06} + 6%|▌ | 29/520 [01:59<30:14, 3.70s/it] 6%|▌ | 30/520 [02:03<30:10, 3.70s/it] {'loss': 11.0745, 'grad_norm': 0.0007251417489872715, 'learning_rate': 2.9942920471376184, 'epoch': 0.06} + 6%|▌ | 30/520 [02:03<30:10, 3.70s/it] 6%|▌ | 31/520 [02:06<30:15, 3.71s/it] {'loss': 10.2989, 'grad_norm': 0.0007810863335181412, 'learning_rate': 2.9934481184117008, 'epoch': 0.06} + 6%|▌ | 31/520 [02:06<30:15, 3.71s/it] 6%|▌ | 32/520 [02:10<30:23, 3.74s/it] {'loss': 11.6737, 'grad_norm': 0.00041206429600400306, 'learning_rate': 2.992546163048102, 'epoch': 0.06} + 6%|▌ | 32/520 [02:10<30:23, 3.74s/it] 6%|▋ | 33/520 [02:14<30:16, 3.73s/it] {'loss': 10.8977, 'grad_norm': 0.0005449345786702799, 'learning_rate': 2.9915862160915196, 'epoch': 0.06} + 6%|▋ | 33/520 [02:14<30:16, 3.73s/it] 7%|▋ | 34/520 [02:18<30:06, 3.72s/it] {'loss': 10.758, 'grad_norm': 0.0005466005954635838, 'learning_rate': 2.990568314839864, 'epoch': 0.07} + 7%|▋ | 34/520 [02:18<30:06, 3.72s/it] 7%|▋ | 35/520 [02:21<30:00, 3.71s/it] {'loss': 10.9213, 'grad_norm': 0.0005241653039584968, 'learning_rate': 2.989492498842809, 'epoch': 0.07} + 7%|▋ | 35/520 [02:21<30:00, 3.71s/it] 7%|▋ | 36/520 [02:25<30:10, 3.74s/it] {'loss': 10.8403, 'grad_norm': 0.0005058843523005429, 'learning_rate': 2.9883588099002583, 'epoch': 0.07} + 7%|▋ | 36/520 [02:25<30:10, 3.74s/it] 7%|▋ | 37/520 [02:29<30:28, 3.79s/it] {'loss': 10.8934, 'grad_norm': 0.0005103614201214244, 'learning_rate': 2.9871672920607155, 'epoch': 0.07} + 7%|▋ | 37/520 [02:29<30:28, 3.79s/it] 7%|▋ | 38/520 [02:33<30:41, 3.82s/it] {'loss': 10.6527, 'grad_norm': 0.0006579034253715654, 'learning_rate': 2.985917991619579, 'epoch': 0.07} + 7%|▋ | 38/520 [02:33<30:41, 3.82s/it] 8%|▊ | 39/520 [02:37<30:52, 3.85s/it] {'loss': 10.5463, 'grad_norm': 0.0009699563269067671, 'learning_rate': 2.984610957117339, 'epoch': 0.07} + 8%|▊ | 39/520 [02:37<30:52, 3.85s/it] 8%|▊ | 40/520 [02:41<30:52, 3.86s/it] {'loss': 10.2573, 'grad_norm': 0.001108492907394352, 'learning_rate': 2.9832462393376926, 'epoch': 0.08} + 8%|▊ | 40/520 [02:41<30:52, 3.86s/it] 8%|▊ | 41/520 [02:45<30:53, 3.87s/it] {'loss': 10.6405, 'grad_norm': 0.0005355860012398107, 'learning_rate': 2.981823891305572, 'epoch': 0.08} + 8%|▊ | 41/520 [02:45<30:53, 3.87s/it] 8%|▊ | 42/520 [02:48<30:49, 3.87s/it] {'loss': 11.0611, 'grad_norm': 0.000373172701603451, 'learning_rate': 2.980343968285082, 'epoch': 0.08} + 8%|▊ | 42/520 [02:48<30:49, 3.87s/it] 8%|▊ | 43/520 [02:52<30:51, 3.88s/it] {'loss': 11.1378, 'grad_norm': 0.0002912843363559723, 'learning_rate': 2.978806527777354, 'epoch': 0.08} + 8%|▊ | 43/520 [02:52<30:51, 3.88s/it] 8%|▊ | 44/520 [02:56<30:48, 3.88s/it] {'loss': 11.0019, 'grad_norm': 0.0003047466083411127, 'learning_rate': 2.977211629518312, 'epoch': 0.08} + 8%|▊ | 44/520 [02:56<30:48, 3.88s/it] 9%|▊ | 45/520 [03:00<30:41, 3.88s/it] {'loss': 10.4177, 'grad_norm': 0.00041548934061464614, 'learning_rate': 2.975559335476352, 'epoch': 0.09} + 9%|▊ | 45/520 [03:00<30:41, 3.88s/it] 9%|▉ | 46/520 [03:04<31:13, 3.95s/it] {'loss': 11.17, 'grad_norm': 0.00022956547845395415, 'learning_rate': 2.9738497098499326, 'epoch': 0.09} + 9%|▉ | 46/520 [03:04<31:13, 3.95s/it] 9%|▉ | 47/520 [03:08<30:53, 3.92s/it] {'loss': 10.4895, 'grad_norm': 0.0003516766705403624, 'learning_rate': 2.972082819065082, 'epoch': 0.09} + 9%|▉ | 47/520 [03:08<30:53, 3.92s/it] 9%|▉ | 48/520 [03:12<30:36, 3.89s/it] {'loss': 10.5271, 'grad_norm': 0.00041434603605696596, 'learning_rate': 2.970258731772816, 'epoch': 0.09} + 9%|▉ | 48/520 [03:12<30:36, 3.89s/it] 9%|▉ | 49/520 [03:16<30:13, 3.85s/it] {'loss': 10.2968, 'grad_norm': 0.0004421366302291926, 'learning_rate': 2.9683775188464727, 'epoch': 0.09} + 9%|▉ | 49/520 [03:16<30:13, 3.85s/it] 10%|▉ | 50/520 [03:19<29:39, 3.79s/it] {'loss': 10.2934, 'grad_norm': 0.0005105501603181442, 'learning_rate': 2.966439253378957, 'epoch': 0.1} + 10%|▉ | 50/520 [03:19<29:39, 3.79s/it] 10%|▉ | 51/520 [03:23<29:21, 3.76s/it] {'loss': 10.3413, 'grad_norm': 0.0005940153102796736, 'learning_rate': 2.9644440106799, 'epoch': 0.1} + 10%|▉ | 51/520 [03:23<29:21, 3.76s/it] 10%|█ | 52/520 [03:27<29:08, 3.74s/it] {'loss': 10.6534, 'grad_norm': 0.0005460124263902887, 'learning_rate': 2.9623918682727353, 'epoch': 0.1} + 10%|█ | 52/520 [03:27<29:08, 3.74s/it] 10%|█ | 53/520 [03:30<28:51, 3.71s/it] {'loss': 10.2339, 'grad_norm': 0.0007769907114063532, 'learning_rate': 2.9602829058916846, 'epoch': 0.1} + 10%|█ | 53/520 [03:30<28:51, 3.71s/it] 10%|█ | 54/520 [03:34<28:42, 3.70s/it] {'loss': 9.9487, 'grad_norm': 0.0012438254137640279, 'learning_rate': 2.9581172054786617, 'epoch': 0.1} + 10%|█ | 54/520 [03:34<28:42, 3.70s/it] 11%|█ | 55/520 [03:38<28:35, 3.69s/it] {'loss': 10.5257, 'grad_norm': 0.0011341552022873429, 'learning_rate': 2.955894851180086, 'epoch': 0.11} + 11%|█ | 55/520 [03:38<28:35, 3.69s/it] 11%|█ | 56/520 [03:41<28:24, 3.67s/it] {'loss': 10.4756, 'grad_norm': 0.0014842983585974091, 'learning_rate': 2.953615929343617, 'epoch': 0.11} + 11%|█ | 56/520 [03:41<28:24, 3.67s/it] 11%|█ | 57/520 [03:45<28:22, 3.68s/it] {'loss': 10.1681, 'grad_norm': 0.0026015999522184235, 'learning_rate': 2.9512805285147943, 'epoch': 0.11} + 11%|█ | 57/520 [03:45<28:22, 3.68s/it] 11%|█ | 58/520 [03:49<28:20, 3.68s/it] {'loss': 10.0155, 'grad_norm': 0.0041848929812956905, 'learning_rate': 2.9488887394336025, 'epoch': 0.11} + 11%|█ | 58/520 [03:49<28:20, 3.68s/it] 11%|█▏ | 59/520 [03:52<28:13, 3.67s/it] {'loss': 10.4118, 'grad_norm': 0.0036045227856821467, 'learning_rate': 2.9464406550309414, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:52<28:13, 3.67s/it] 12%|█▏ | 60/520 [03:56<28:12, 3.68s/it] {'loss': 10.2786, 'grad_norm': 0.002842837627199614, 'learning_rate': 2.9439363704250177, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:56<28:12, 3.68s/it] 12%|█▏ | 61/520 [04:00<28:04, 3.67s/it] {'loss': 11.1188, 'grad_norm': 0.00032284010455579156, 'learning_rate': 2.9413759829176493, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:00<28:04, 3.67s/it] 12%|█▏ | 62/520 [04:03<28:04, 3.68s/it] {'loss': 10.4757, 'grad_norm': 0.0003619199874360127, 'learning_rate': 2.9387595919904816, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:03<28:04, 3.68s/it] 12%|█▏ | 63/520 [04:07<27:53, 3.66s/it] {'loss': 10.0462, 'grad_norm': 0.00023219443502623638, 'learning_rate': 2.936087299301127, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:07<27:53, 3.66s/it] 12%|█▏ | 64/520 [04:11<27:55, 3.67s/it] {'loss': 10.0227, 'grad_norm': 0.00017812826450639704, 'learning_rate': 2.933359208679211, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:11<27:55, 3.67s/it] 12%|█▎ | 65/520 [04:14<27:58, 3.69s/it] {'loss': 10.6594, 'grad_norm': 0.0001654135792314007, 'learning_rate': 2.9305754261223402, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:14<27:58, 3.69s/it] 13%|█▎ | 66/520 [04:18<28:17, 3.74s/it] {'loss': 10.2548, 'grad_norm': 0.00015762788123433445, 'learning_rate': 2.9277360597919837, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:18<28:17, 3.74s/it] 13%|█▎ | 67/520 [04:22<28:17, 3.75s/it] {'loss': 10.2539, 'grad_norm': 0.00017163486772511948, 'learning_rate': 2.924841220009269, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:22<28:17, 3.75s/it] 13%|█▎ | 68/520 [04:26<28:09, 3.74s/it] {'loss': 9.814, 'grad_norm': 0.00016109137625885034, 'learning_rate': 2.9218910192506975, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:26<28:09, 3.74s/it] 13%|█▎ | 69/520 [04:29<27:54, 3.71s/it] {'loss': 10.1308, 'grad_norm': 0.00016087070348808717, 'learning_rate': 2.9188855721437736, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:29<27:54, 3.71s/it] 13%|█▎ | 70/520 [04:33<27:50, 3.71s/it] {'loss': 10.1323, 'grad_norm': 0.00014540035441116693, 'learning_rate': 2.9158249954625513, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:33<27:50, 3.71s/it] 14%|█▎ | 71/520 [04:37<27:47, 3.71s/it] {'loss': 10.0123, 'grad_norm': 0.00014892883676256743, 'learning_rate': 2.9127094081230953, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:37<27:47, 3.71s/it] 14%|█▍ | 72/520 [04:41<27:59, 3.75s/it] {'loss': 10.2091, 'grad_norm': 0.00013912932922031376, 'learning_rate': 2.9095389311788624, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:41<27:59, 3.75s/it] 14%|█▍ | 73/520 [04:44<28:09, 3.78s/it] {'loss': 9.8989, 'grad_norm': 0.0001562049369268183, 'learning_rate': 2.9063136878159987, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:44<28:09, 3.78s/it] 14%|█▍ | 74/520 [04:48<27:51, 3.75s/it] {'loss': 10.355, 'grad_norm': 0.0001460894183223977, 'learning_rate': 2.903033803348551, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:48<27:51, 3.75s/it] 14%|█▍ | 75/520 [04:52<27:50, 3.75s/it] {'loss': 9.523, 'grad_norm': 0.00016859494873934248, 'learning_rate': 2.8996994052135996, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:52<27:50, 3.75s/it] 15%|█▍ | 76/520 [04:56<27:49, 3.76s/it] {'loss': 10.4917, 'grad_norm': 0.00011721714694584885, 'learning_rate': 2.8963106229663063, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:56<27:49, 3.76s/it] 15%|█▍ | 77/520 [04:59<27:30, 3.73s/it] {'loss': 10.4198, 'grad_norm': 0.00015953465587608575, 'learning_rate': 2.89286758827488, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:59<27:30, 3.73s/it] 15%|█▌ | 78/520 [05:03<27:19, 3.71s/it] {'loss': 9.7465, 'grad_norm': 0.00014954873543392987, 'learning_rate': 2.889370434915463, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:03<27:19, 3.71s/it] 15%|█▌ | 79/520 [05:07<27:16, 3.71s/it] {'loss': 9.9888, 'grad_norm': 0.00013009849508757018, 'learning_rate': 2.88581929876693, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:07<27:16, 3.71s/it] 15%|█▌ | 80/520 [05:11<27:23, 3.73s/it] {'loss': 11.0794, 'grad_norm': 0.00014193438947194179, 'learning_rate': 2.8822143178056114, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:11<27:23, 3.73s/it] 16%|█▌ | 81/520 [05:14<27:18, 3.73s/it] {'loss': 10.8856, 'grad_norm': 0.00015162430254938908, 'learning_rate': 2.878555632099931, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:14<27:18, 3.73s/it] 16%|█▌ | 82/520 [05:18<27:15, 3.73s/it] {'loss': 10.1428, 'grad_norm': 0.00013323487550074846, 'learning_rate': 2.874843383804964, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:18<27:15, 3.73s/it] 16%|█▌ | 83/520 [05:22<27:08, 3.73s/it] {'loss': 10.4369, 'grad_norm': 0.0001303051159682715, 'learning_rate': 2.871077717156915, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:22<27:08, 3.73s/it] 16%|█▌ | 84/520 [05:25<27:03, 3.72s/it] {'loss': 10.1732, 'grad_norm': 0.00011786170909740189, 'learning_rate': 2.8672587784675097, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:25<27:03, 3.72s/it] 16%|█▋ | 85/520 [05:29<27:01, 3.73s/it] {'loss': 9.9315, 'grad_norm': 0.0001230532838891392, 'learning_rate': 2.8633867161183164, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:29<27:01, 3.73s/it] 17%|█▋ | 86/520 [05:33<27:12, 3.76s/it] {'loss': 10.4494, 'grad_norm': 0.00011690075647077894, 'learning_rate': 2.859461680554975, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:33<27:12, 3.76s/it] 17%|█▋ | 87/520 [05:37<27:09, 3.76s/it] {'loss': 11.1017, 'grad_norm': 0.00012618568215098803, 'learning_rate': 2.855483824281355, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:37<27:09, 3.76s/it] 17%|█▋ | 88/520 [05:41<27:18, 3.79s/it] {'loss': 10.9768, 'grad_norm': 0.0001122953763158253, 'learning_rate': 2.8514533018536286, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:41<27:18, 3.79s/it] 17%|█▋ | 89/520 [05:44<27:06, 3.77s/it] {'loss': 10.1406, 'grad_norm': 0.0001294999945838342, 'learning_rate': 2.8473702698742662, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:44<27:06, 3.77s/it] 17%|█▋ | 90/520 [05:48<27:07, 3.78s/it] {'loss': 10.1379, 'grad_norm': 0.00014227286345126544, 'learning_rate': 2.843234886985951, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:48<27:07, 3.78s/it] 18%|█▊ | 91/520 [05:52<27:01, 3.78s/it] {'loss': 9.9872, 'grad_norm': 0.00013773918736464008, 'learning_rate': 2.839047313865417, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:52<27:01, 3.78s/it] 18%|█▊ | 92/520 [05:56<27:11, 3.81s/it] {'loss': 9.8319, 'grad_norm': 0.00018119110837570897, 'learning_rate': 2.834807713217203, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:56<27:11, 3.81s/it] 18%|█▊ | 93/520 [06:00<27:10, 3.82s/it] {'loss': 10.3185, 'grad_norm': 0.00018244993667671788, 'learning_rate': 2.8305162497673324, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:00<27:10, 3.82s/it] 18%|█▊ | 94/520 [06:04<27:16, 3.84s/it] {'loss': 10.3522, 'grad_norm': 0.000184186915445296, 'learning_rate': 2.8261730902569147, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:04<27:16, 3.84s/it] 18%|█▊ | 95/520 [06:07<27:08, 3.83s/it] {'loss': 10.3386, 'grad_norm': 0.000209791427192916, 'learning_rate': 2.8217784034356637, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:07<27:08, 3.83s/it] 18%|█▊ | 96/520 [06:11<26:49, 3.80s/it] {'loss': 9.7238, 'grad_norm': 0.00023575669239170386, 'learning_rate': 2.817332360055343, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:11<26:49, 3.80s/it] 19%|█▊ | 97/520 [06:15<26:40, 3.78s/it] {'loss': 10.5872, 'grad_norm': 0.00014350780021774216, 'learning_rate': 2.812835132863131, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:15<26:40, 3.78s/it] 19%|█▉ | 98/520 [06:19<26:39, 3.79s/it] {'loss': 9.4732, 'grad_norm': 0.00013117616774106325, 'learning_rate': 2.8082868965949084, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:19<26:39, 3.79s/it] 19%|█▉ | 99/520 [06:22<26:31, 3.78s/it] {'loss': 10.3455, 'grad_norm': 0.00011134206016786738, 'learning_rate': 2.80368782796847, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:22<26:31, 3.78s/it] 19%|█▉ | 100/520 [06:26<26:35, 3.80s/it] {'loss': 10.7038, 'grad_norm': 0.00010819054596976613, 'learning_rate': 2.799038105676658, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:26<26:35, 3.80s/it] 19%|█▉ | 101/520 [06:30<26:48, 3.84s/it] {'loss': 9.8946, 'grad_norm': 0.0001027970756575663, 'learning_rate': 2.7943379103804196, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:30<26:48, 3.84s/it] 20%|█▉ | 102/520 [06:34<26:49, 3.85s/it] {'loss': 10.3893, 'grad_norm': 0.00011877537137793387, 'learning_rate': 2.7895874247017853, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:34<26:49, 3.85s/it] 20%|█▉ | 103/520 [06:38<26:55, 3.87s/it] {'loss': 9.3982, 'grad_norm': 0.0001276166070088058, 'learning_rate': 2.7847868332167773, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:38<26:55, 3.87s/it] 20%|██ | 104/520 [06:42<26:51, 3.87s/it] {'loss': 10.3845, 'grad_norm': 0.0001216121057668544, 'learning_rate': 2.7799363224482336, 'epoch': 0.2} + 20%|██ | 104/520 [06:42<26:51, 3.87s/it] 20%|██ | 105/520 [06:46<26:51, 3.88s/it] {'loss': 9.9643, 'grad_norm': 0.00011917255977515452, 'learning_rate': 2.7750360808585635, 'epoch': 0.2} + 20%|██ | 105/520 [06:46<26:51, 3.88s/it] 20%|██ | 106/520 [06:50<26:43, 3.87s/it] {'loss': 10.4036, 'grad_norm': 9.239742441667737e-05, 'learning_rate': 2.7700862988424264, 'epoch': 0.2} + 20%|██ | 106/520 [06:50<26:43, 3.87s/it] 21%|██ | 107/520 [06:53<26:41, 3.88s/it] {'loss': 10.4324, 'grad_norm': 8.784135260193764e-05, 'learning_rate': 2.7650871687193286, 'epoch': 0.21} + 21%|██ | 107/520 [06:53<26:41, 3.88s/it] 21%|██ | 108/520 [06:57<26:34, 3.87s/it] {'loss': 10.2799, 'grad_norm': 0.00011514283207396887, 'learning_rate': 2.7600388847261574, 'epoch': 0.21} + 21%|██ | 108/520 [06:57<26:34, 3.87s/it] 21%|██ | 109/520 [07:01<26:34, 3.88s/it] {'loss': 10.8304, 'grad_norm': 0.00011282544716249488, 'learning_rate': 2.7549416430096296, 'epoch': 0.21} + 21%|██ | 109/520 [07:01<26:34, 3.88s/it] 21%|██ | 110/520 [07:05<26:30, 3.88s/it] {'loss': 10.2768, 'grad_norm': 9.602689985028653e-05, 'learning_rate': 2.7497956416186735, 'epoch': 0.21} + 21%|██ | 110/520 [07:05<26:30, 3.88s/it] 21%|██▏ | 111/520 [07:09<26:28, 3.88s/it] {'loss': 10.5128, 'grad_norm': 0.00010281642357060434, 'learning_rate': 2.7446010804967313, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:09<26:28, 3.88s/it] 22%|██▏ | 112/520 [07:13<26:02, 3.83s/it] {'loss': 10.1326, 'grad_norm': 9.724220495311954e-05, 'learning_rate': 2.7393581614739926, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:13<26:02, 3.83s/it] 22%|██▏ | 113/520 [07:16<25:44, 3.80s/it] {'loss': 9.8427, 'grad_norm': 0.00011555470944362564, 'learning_rate': 2.73406708825955, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:16<25:44, 3.80s/it] 22%|██▏ | 114/520 [07:20<25:26, 3.76s/it] {'loss': 10.5809, 'grad_norm': 0.00010961609215718965, 'learning_rate': 2.728728066433488, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:20<25:26, 3.76s/it] 22%|██▏ | 115/520 [07:24<25:14, 3.74s/it] {'loss': 10.7583, 'grad_norm': 0.00010929284842711571, 'learning_rate': 2.7233413034388905, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:24<25:14, 3.74s/it] 22%|██▏ | 116/520 [07:28<25:09, 3.74s/it] {'loss': 10.1267, 'grad_norm': 9.179930677834964e-05, 'learning_rate': 2.717907008573785, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:28<25:09, 3.74s/it] 22%|██▎ | 117/520 [07:31<25:05, 3.74s/it] {'loss': 10.3154, 'grad_norm': 9.639368434532905e-05, 'learning_rate': 2.712425392983008, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:31<25:05, 3.74s/it] 23%|██▎ | 118/520 [07:35<24:51, 3.71s/it] {'loss': 9.7265, 'grad_norm': 0.00010968715804026869, 'learning_rate': 2.7068966696500025, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:35<24:51, 3.71s/it] 23%|██▎ | 119/520 [07:39<24:44, 3.70s/it] {'loss': 9.5385, 'grad_norm': 0.00013707874731519907, 'learning_rate': 2.701321053388542, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:39<24:44, 3.70s/it] 23%|██▎ | 120/520 [07:42<24:48, 3.72s/it] {'loss': 10.2141, 'grad_norm': 0.00011473965447234841, 'learning_rate': 2.6956987608343836, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:42<24:48, 3.72s/it] 23%|██▎ | 121/520 [07:46<25:11, 3.79s/it] {'loss': 9.6181, 'grad_norm': 9.690138236626156e-05, 'learning_rate': 2.690030010436853, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:46<25:11, 3.79s/it] 23%|██▎ | 122/520 [07:50<25:16, 3.81s/it] {'loss': 9.7133, 'grad_norm': 0.00010005406650734582, 'learning_rate': 2.6843150224503534, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:50<25:16, 3.81s/it] 24%|██▎ | 123/520 [07:54<25:30, 3.85s/it] {'loss': 10.5249, 'grad_norm': 9.029134135868361e-05, 'learning_rate': 2.6785540189258104, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:54<25:30, 3.85s/it] 24%|██▍ | 124/520 [07:58<25:27, 3.86s/it] {'loss': 10.6183, 'grad_norm': 0.00012680199605047087, 'learning_rate': 2.6727472237020446, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:58<25:27, 3.86s/it] 24%|██▍ | 125/520 [08:02<25:39, 3.90s/it] {'loss': 10.067, 'grad_norm': 9.297555606887933e-05, 'learning_rate': 2.666894862397072, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:02<25:39, 3.90s/it] 24%|██▍ | 126/520 [08:07<26:54, 4.10s/it] {'loss': 9.8247, 'grad_norm': 8.190165310890703e-05, 'learning_rate': 2.660997162399341, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:07<26:54, 4.10s/it] 24%|██▍ | 127/520 [08:10<26:28, 4.04s/it] {'loss': 10.4814, 'grad_norm': 9.92605657063061e-05, 'learning_rate': 2.6550543528588944, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:10<26:28, 4.04s/it] 25%|██▍ | 128/520 [08:14<26:05, 3.99s/it] {'loss': 10.2851, 'grad_norm': 8.752693225824027e-05, 'learning_rate': 2.649066664678467, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:14<26:05, 3.99s/it] 25%|██▍ | 129/520 [08:18<25:52, 3.97s/it] {'loss': 9.398, 'grad_norm': 9.575005430701659e-05, 'learning_rate': 2.6430343305045163, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:18<25:52, 3.97s/it] 25%|██▌ | 130/520 [08:22<25:29, 3.92s/it] {'loss': 10.3498, 'grad_norm': 8.071417406891545e-05, 'learning_rate': 2.6369575847181794, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:22<25:29, 3.92s/it] 25%|██▌ | 131/520 [08:26<25:00, 3.86s/it] {'loss': 10.2974, 'grad_norm': 7.401630179102367e-05, 'learning_rate': 2.6308366634261695, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:26<25:00, 3.86s/it] 25%|██▌ | 132/520 [08:29<24:35, 3.80s/it] {'loss': 10.4677, 'grad_norm': 9.039712244364993e-05, 'learning_rate': 2.6246718044516015, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:29<24:35, 3.80s/it] 26%|██▌ | 133/520 [08:33<24:20, 3.77s/it] {'loss': 10.3797, 'grad_norm': 9.764301482949564e-05, 'learning_rate': 2.6184632473247484, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:33<24:20, 3.77s/it] 26%|██▌ | 134/520 [08:37<24:10, 3.76s/it] {'loss': 10.1472, 'grad_norm': 8.746363432183555e-05, 'learning_rate': 2.61221123327374, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:37<24:10, 3.76s/it] 26%|██▌ | 135/520 [08:41<23:59, 3.74s/it] {'loss': 10.3537, 'grad_norm': 8.424825876247455e-05, 'learning_rate': 2.605916005215186, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:41<23:59, 3.74s/it] 26%|██▌ | 136/520 [08:44<23:48, 3.72s/it] {'loss': 9.7952, 'grad_norm': 8.200237811568898e-05, 'learning_rate': 2.5995778077447396, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:44<23:48, 3.72s/it] 26%|██▋ | 137/520 [08:48<23:38, 3.70s/it] {'loss': 10.2543, 'grad_norm': 8.131733339464648e-05, 'learning_rate': 2.5931968871275926, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:48<23:38, 3.70s/it] 27%|██▋ | 138/520 [08:52<23:29, 3.69s/it] {'loss': 9.7913, 'grad_norm': 7.893410767704809e-05, 'learning_rate': 2.586773491288909, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:52<23:29, 3.69s/it] 27%|██▋ | 139/520 [08:55<23:30, 3.70s/it] {'loss': 10.1244, 'grad_norm': 7.086300363539078e-05, 'learning_rate': 2.58030786980419, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:55<23:30, 3.70s/it] 27%|██▋ | 140/520 [08:59<23:35, 3.72s/it] {'loss': 10.4179, 'grad_norm': 6.925456726344542e-05, 'learning_rate': 2.5738002738895776, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:59<23:35, 3.72s/it] 27%|██▋ | 141/520 [09:03<23:48, 3.77s/it] {'loss': 9.9779, 'grad_norm': 7.366454436209359e-05, 'learning_rate': 2.5672509563920953, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:03<23:48, 3.77s/it] 27%|██▋ | 142/520 [09:07<23:51, 3.79s/it] {'loss': 10.4042, 'grad_norm': 6.560884165652334e-05, 'learning_rate': 2.560660171779821, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:07<23:51, 3.79s/it] 28%|██▊ | 143/520 [09:11<23:55, 3.81s/it] {'loss': 10.415, 'grad_norm': 8.134579500017926e-05, 'learning_rate': 2.554028176132004, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:11<23:55, 3.81s/it] 28%|██▊ | 144/520 [09:14<23:58, 3.83s/it] {'loss': 9.6609, 'grad_norm': 8.815999547695021e-05, 'learning_rate': 2.547355227129109, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:14<23:58, 3.83s/it] 28%|██▊ | 145/520 [09:18<23:54, 3.83s/it] {'loss': 9.9214, 'grad_norm': 8.332801730046239e-05, 'learning_rate': 2.5406415840428123, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:18<23:54, 3.83s/it] 28%|██▊ | 146/520 [09:22<23:48, 3.82s/it] {'loss': 10.6453, 'grad_norm': 7.353839039494623e-05, 'learning_rate': 2.5338875077259204, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:22<23:48, 3.82s/it] 28%|██▊ | 147/520 [09:26<23:47, 3.83s/it] {'loss': 9.6927, 'grad_norm': 8.383659614351632e-05, 'learning_rate': 2.52709326060224, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:26<23:47, 3.83s/it] 28%|██▊ | 148/520 [09:30<23:41, 3.82s/it] {'loss': 9.8403, 'grad_norm': 7.711918549012039e-05, 'learning_rate': 2.520259106656379, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:30<23:41, 3.82s/it] 29%|██▊ | 149/520 [09:34<23:40, 3.83s/it] {'loss': 10.0917, 'grad_norm': 8.541314310138262e-05, 'learning_rate': 2.5133853114234905, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:34<23:40, 3.83s/it] 29%|██▉ | 150/520 [09:37<23:33, 3.82s/it] {'loss': 10.0583, 'grad_norm': 7.06721285344281e-05, 'learning_rate': 2.5064721419789553, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:37<23:33, 3.82s/it] 29%|██▉ | 151/520 [09:41<23:32, 3.83s/it] {'loss': 9.8109, 'grad_norm': 7.898053624384239e-05, 'learning_rate': 2.499519866928006, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:41<23:32, 3.83s/it] 29%|██▉ | 152/520 [09:45<23:28, 3.83s/it] {'loss': 10.0365, 'grad_norm': 8.204385188127186e-05, 'learning_rate': 2.492528756395289, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:45<23:28, 3.83s/it] 29%|██▉ | 153/520 [09:49<23:23, 3.82s/it] {'loss': 9.7376, 'grad_norm': 8.039400618990136e-05, 'learning_rate': 2.4854990820143708, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:49<23:23, 3.82s/it] 30%|██▉ | 154/520 [09:53<23:17, 3.82s/it] {'loss': 9.9873, 'grad_norm': 8.217337157214352e-05, 'learning_rate': 2.4784311169171818, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:53<23:17, 3.82s/it] 30%|██▉ | 155/520 [09:56<23:03, 3.79s/it] {'loss': 10.2843, 'grad_norm': 9.14553656551894e-05, 'learning_rate': 2.4713251357234056, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:56<23:03, 3.79s/it] 30%|███ | 156/520 [10:00<22:45, 3.75s/it] {'loss': 10.3136, 'grad_norm': 8.827135077373648e-05, 'learning_rate': 2.4641814145298087, 'epoch': 0.3} + 30%|███ | 156/520 [10:00<22:45, 3.75s/it] 30%|███ | 157/520 [10:04<22:29, 3.72s/it] {'loss': 10.7232, 'grad_norm': 7.230683150202043e-05, 'learning_rate': 2.457000230899513, 'epoch': 0.3} + 30%|███ | 157/520 [10:04<22:29, 3.72s/it] 30%|███ | 158/520 [10:07<22:18, 3.70s/it] {'loss': 9.8383, 'grad_norm': 7.464506113703111e-05, 'learning_rate': 2.44978186385121, 'epoch': 0.3} + 30%|███ | 158/520 [10:07<22:18, 3.70s/it] 31%|███ | 159/520 [10:11<22:08, 3.68s/it] {'loss': 9.7477, 'grad_norm': 7.404145508249825e-05, 'learning_rate': 2.4425265938483207, 'epoch': 0.31} + 31%|███ | 159/520 [10:11<22:08, 3.68s/it] 31%|███ | 160/520 [10:15<22:09, 3.69s/it] {'loss': 9.8265, 'grad_norm': 7.133379849251807e-05, 'learning_rate': 2.4352347027881005, 'epoch': 0.31} + 31%|███ | 160/520 [10:15<22:09, 3.69s/it] 31%|███ | 161/520 [10:18<21:57, 3.67s/it] {'loss': 9.9873, 'grad_norm': 6.574401949489043e-05, 'learning_rate': 2.4279064739906824, 'epoch': 0.31} + 31%|███ | 161/520 [10:18<21:57, 3.67s/it] 31%|███ | 162/520 [10:22<21:52, 3.67s/it] {'loss': 10.6076, 'grad_norm': 6.504169138499642e-05, 'learning_rate': 2.420542192188071, 'epoch': 0.31} + 31%|███ | 162/520 [10:22<21:52, 3.67s/it] 31%|███▏ | 163/520 [10:26<21:52, 3.68s/it] {'loss': 9.8599, 'grad_norm': 7.748115715746033e-05, 'learning_rate': 2.413142143513081, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:26<21:52, 3.68s/it] 32%|███▏ | 164/520 [10:29<21:47, 3.67s/it] {'loss': 9.7815, 'grad_norm': 8.771077723524491e-05, 'learning_rate': 2.4057066154882163, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:29<21:47, 3.67s/it] 32%|███▏ | 165/520 [10:33<21:43, 3.67s/it] {'loss': 9.7555, 'grad_norm': 7.832404326464651e-05, 'learning_rate': 2.3982358970145006, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:33<21:43, 3.67s/it] 32%|███▏ | 166/520 [10:37<21:37, 3.67s/it] {'loss': 9.7091, 'grad_norm': 7.906626978576394e-05, 'learning_rate': 2.390730278360252, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:37<21:37, 3.67s/it] 32%|███▏ | 167/520 [10:40<21:33, 3.66s/it] {'loss': 10.0943, 'grad_norm': 6.906705570404822e-05, 'learning_rate': 2.383190051149807, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:40<21:33, 3.66s/it] 32%|███▏ | 168/520 [10:44<21:33, 3.68s/it] {'loss': 9.7185, 'grad_norm': 7.3170681310642e-05, 'learning_rate': 2.375615508352185, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:44<21:33, 3.68s/it] 32%|███▎ | 169/520 [10:48<21:30, 3.68s/it] {'loss': 10.0352, 'grad_norm': 6.575665468847308e-05, 'learning_rate': 2.368006944269709, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:48<21:30, 3.68s/it] 33%|███▎ | 170/520 [10:51<21:29, 3.69s/it] {'loss': 10.1561, 'grad_norm': 5.640177412657738e-05, 'learning_rate': 2.360364654526569, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:51<21:29, 3.69s/it] 33%|███▎ | 171/520 [10:55<21:25, 3.68s/it] {'loss': 9.8128, 'grad_norm': 7.49787772248565e-05, 'learning_rate': 2.352688936057339, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:55<21:25, 3.68s/it] 33%|███▎ | 172/520 [10:59<21:15, 3.67s/it] {'loss': 9.6902, 'grad_norm': 7.407534189115823e-05, 'learning_rate': 2.3449800870954327, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:59<21:15, 3.67s/it] 33%|███▎ | 173/520 [11:02<21:15, 3.68s/it] {'loss': 9.7698, 'grad_norm': 8.66381123241094e-05, 'learning_rate': 2.337238407161526, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:02<21:15, 3.68s/it] 33%|███▎ | 174/520 [11:06<21:11, 3.67s/it] {'loss': 10.206, 'grad_norm': 6.567169565330615e-05, 'learning_rate': 2.3294641970519088, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:06<21:11, 3.67s/it] 34%|███▎ | 175/520 [11:10<21:04, 3.67s/it] {'loss': 9.7578, 'grad_norm': 7.321465989788596e-05, 'learning_rate': 2.3216577588268072, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:10<21:04, 3.67s/it] 34%|███▍ | 176/520 [11:13<21:02, 3.67s/it] {'loss': 10.6722, 'grad_norm': 6.657409020002169e-05, 'learning_rate': 2.3138193957986393, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:13<21:02, 3.67s/it] 34%|███▍ | 177/520 [11:17<20:58, 3.67s/it] {'loss': 10.3753, 'grad_norm': 6.127535355101613e-05, 'learning_rate': 2.3059494125202358, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:17<20:58, 3.67s/it] 34%|███▍ | 178/520 [11:21<20:58, 3.68s/it] {'loss': 10.0068, 'grad_norm': 6.079024944741335e-05, 'learning_rate': 2.298048114773005, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:21<20:58, 3.68s/it] 34%|███▍ | 179/520 [11:24<20:52, 3.67s/it] {'loss': 9.891, 'grad_norm': 5.978705921399023e-05, 'learning_rate': 2.290115809555051, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:24<20:52, 3.67s/it] 35%|███▍ | 180/520 [11:28<20:46, 3.67s/it] {'loss': 10.1345, 'grad_norm': 6.161590391510105e-05, 'learning_rate': 2.282152805069247, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:28<20:46, 3.67s/it] 35%|███▍ | 181/520 [11:32<20:49, 3.68s/it] {'loss': 9.58, 'grad_norm': 6.754930145935384e-05, 'learning_rate': 2.2741594107112597, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:32<20:49, 3.68s/it] 35%|███▌ | 182/520 [11:36<20:59, 3.73s/it] {'loss': 10.1593, 'grad_norm': 7.292286290586943e-05, 'learning_rate': 2.2661359370575287, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:36<20:59, 3.73s/it] 35%|███▌ | 183/520 [11:40<21:11, 3.77s/it] {'loss': 9.6503, 'grad_norm': 7.85308720777007e-05, 'learning_rate': 2.2580826958531963, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:40<21:11, 3.77s/it] 35%|███▌ | 184/520 [11:43<21:21, 3.81s/it] {'loss': 9.8025, 'grad_norm': 8.283908993374682e-05, 'learning_rate': 2.25, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:43<21:21, 3.81s/it] 36%|███▌ | 185/520 [11:47<21:11, 3.80s/it] {'loss': 10.3122, 'grad_norm': 6.76249273526292e-05, 'learning_rate': 2.241888163544111, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:47<21:11, 3.80s/it] 36%|███▌ | 186/520 [11:51<20:58, 3.77s/it] {'loss': 9.7849, 'grad_norm': 6.777780692497931e-05, 'learning_rate': 2.233747501663934, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:51<20:58, 3.77s/it] 36%|███▌ | 187/520 [11:55<20:53, 3.76s/it] {'loss': 10.3918, 'grad_norm': 6.988214708006185e-05, 'learning_rate': 2.22557833065786, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:55<20:53, 3.76s/it] 36%|███▌ | 188/520 [11:58<20:38, 3.73s/it] {'loss': 9.6523, 'grad_norm': 6.289887774186618e-05, 'learning_rate': 2.2173809679319776, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:58<20:38, 3.73s/it] 36%|███▋ | 189/520 [12:02<20:30, 3.72s/it] {'loss': 9.9902, 'grad_norm': 5.5210342048643785e-05, 'learning_rate': 2.2091557319877406, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:02<20:30, 3.72s/it] 37%|███▋ | 190/520 [12:06<20:21, 3.70s/it] {'loss': 9.9408, 'grad_norm': 6.617251942999136e-05, 'learning_rate': 2.200902942409593, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:06<20:21, 3.70s/it] 37%|███▋ | 191/520 [12:09<20:15, 3.69s/it] {'loss': 10.1221, 'grad_norm': 7.036121348735045e-05, 'learning_rate': 2.192622919852551, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:09<20:15, 3.69s/it] 37%|███▋ | 192/520 [12:13<20:11, 3.69s/it] {'loss': 10.335, 'grad_norm': 6.973516626951223e-05, 'learning_rate': 2.1843159860297447, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:13<20:11, 3.69s/it] 37%|███▋ | 193/520 [12:17<20:10, 3.70s/it] {'loss': 10.4369, 'grad_norm': 6.213406152949562e-05, 'learning_rate': 2.175982463699918, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:17<20:10, 3.70s/it] 37%|███▋ | 194/520 [12:20<20:07, 3.70s/it] {'loss': 9.8085, 'grad_norm': 5.880889021748336e-05, 'learning_rate': 2.1676226766548883, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:20<20:07, 3.70s/it] 38%|███▊ | 195/520 [12:24<20:03, 3.70s/it] {'loss': 9.5794, 'grad_norm': 6.337151219766915e-05, 'learning_rate': 2.1592369497069672, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:24<20:03, 3.70s/it] 38%|███▊ | 196/520 [12:28<19:54, 3.69s/it] {'loss': 9.9864, 'grad_norm': 6.333362442292363e-05, 'learning_rate': 2.150825608676337, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:28<19:54, 3.69s/it] 38%|███▊ | 197/520 [12:32<19:52, 3.69s/it] {'loss': 9.6656, 'grad_norm': 6.422939691622459e-05, 'learning_rate': 2.142388980378394, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:32<19:52, 3.69s/it] 38%|███▊ | 198/520 [12:35<19:47, 3.69s/it] {'loss': 10.1394, 'grad_norm': 5.821971040857496e-05, 'learning_rate': 2.1339273926110494, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:35<19:47, 3.69s/it] 38%|███▊ | 199/520 [12:39<19:42, 3.68s/it] {'loss': 10.053, 'grad_norm': 6.25198133526983e-05, 'learning_rate': 2.1254411741419923, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:39<19:42, 3.68s/it] 38%|███▊ | 200/520 [12:43<19:38, 3.68s/it] {'loss': 10.2385, 'grad_norm': 5.431309376343447e-05, 'learning_rate': 2.116930654695918, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:43<19:38, 3.68s/it] 39%|███▊ | 201/520 [12:46<19:37, 3.69s/it] {'loss': 9.8498, 'grad_norm': 5.459480011343985e-05, 'learning_rate': 2.1083961649417127, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:46<19:37, 3.69s/it] 39%|███▉ | 202/520 [12:50<19:31, 3.68s/it] {'loss': 10.0279, 'grad_norm': 5.6494396102749394e-05, 'learning_rate': 2.0998380364796114, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:50<19:31, 3.68s/it] 39%|███▉ | 203/520 [12:54<19:40, 3.72s/it] {'loss': 9.8165, 'grad_norm': 5.692265540002101e-05, 'learning_rate': 2.0912566018283094, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:54<19:40, 3.72s/it] 39%|███▉ | 204/520 [12:58<19:52, 3.77s/it] {'loss': 10.0607, 'grad_norm': 5.277287607697074e-05, 'learning_rate': 2.0826521944120424, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:58<19:52, 3.77s/it] 39%|███▉ | 205/520 [13:02<19:58, 3.81s/it] {'loss': 10.1983, 'grad_norm': 4.955731939256991e-05, 'learning_rate': 2.074025148547635, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:02<19:58, 3.81s/it] 40%|███▉ | 206/520 [13:05<20:06, 3.84s/it] {'loss': 10.1472, 'grad_norm': 5.6925895679613656e-05, 'learning_rate': 2.065375799431508, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:05<20:06, 3.84s/it] 40%|███▉ | 207/520 [13:09<20:16, 3.89s/it] {'loss': 10.1257, 'grad_norm': 5.579230132574044e-05, 'learning_rate': 2.0567044831266568, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:09<20:16, 3.89s/it] 40%|████ | 208/520 [13:14<20:32, 3.95s/it] {'loss': 10.1275, 'grad_norm': 6.73878528693841e-05, 'learning_rate': 2.048011536549593, 'epoch': 0.4} + 40%|████ | 208/520 [13:14<20:32, 3.95s/it] 40%|████ | 209/520 [13:18<20:46, 4.01s/it] {'loss': 10.1603, 'grad_norm': 6.332691891287373e-05, 'learning_rate': 2.039297297457251, 'epoch': 0.4} + 40%|████ | 209/520 [13:18<20:46, 4.01s/it] 40%|████ | 210/520 [13:22<20:42, 4.01s/it] {'loss': 9.9018, 'grad_norm': 6.035050782409347e-05, 'learning_rate': 2.030562104433872, 'epoch': 0.4} + 40%|████ | 210/520 [13:22<20:42, 4.01s/it] 41%|████ | 211/520 [13:26<20:21, 3.95s/it] {'loss': 10.0059, 'grad_norm': 5.709735719223742e-05, 'learning_rate': 2.0218062968778407, 'epoch': 0.41} + 41%|████ | 211/520 [13:26<20:21, 3.95s/it] 41%|████ | 212/520 [13:29<19:54, 3.88s/it] {'loss': 9.4007, 'grad_norm': 6.359729478250439e-05, 'learning_rate': 2.013030214988503, 'epoch': 0.41} + 41%|████ | 212/520 [13:29<19:54, 3.88s/it] 41%|████ | 213/520 [13:33<19:42, 3.85s/it] {'loss': 10.5339, 'grad_norm': 7.167021352677699e-05, 'learning_rate': 2.0042341997529465, 'epoch': 0.41} + 41%|████ | 213/520 [13:33<19:42, 3.85s/it] 41%|████ | 214/520 [13:37<19:23, 3.80s/it] {'loss': 9.9295, 'grad_norm': 5.2602227797661806e-05, 'learning_rate': 1.9954185929327508, 'epoch': 0.41} + 41%|████ | 214/520 [13:37<19:23, 3.80s/it] 41%|████▏ | 215/520 [13:40<19:14, 3.79s/it] {'loss': 10.2288, 'grad_norm': 5.602823388872724e-05, 'learning_rate': 1.9865837370507107, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:40<19:14, 3.79s/it] 42%|████▏ | 216/520 [13:44<19:04, 3.77s/it] {'loss': 9.9015, 'grad_norm': 5.658914964846819e-05, 'learning_rate': 1.9777299753775268, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:44<19:04, 3.77s/it] 42%|████▏ | 217/520 [13:48<18:54, 3.74s/it] {'loss': 9.8376, 'grad_norm': 5.2810477938513355e-05, 'learning_rate': 1.9688576519184668, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:48<18:54, 3.74s/it] 42%|████▏ | 218/520 [13:52<18:45, 3.73s/it] {'loss': 10.4089, 'grad_norm': 6.294336972386576e-05, 'learning_rate': 1.9599671114000015, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:52<18:45, 3.73s/it] 42%|████▏ | 219/520 [13:55<18:47, 3.74s/it] {'loss': 9.3632, 'grad_norm': 6.180010603290399e-05, 'learning_rate': 1.9510586992564094, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:55<18:47, 3.74s/it] 42%|████▏ | 220/520 [13:59<18:38, 3.73s/it] {'loss': 10.2292, 'grad_norm': 5.3371755009910705e-05, 'learning_rate': 1.9421327616163564, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:59<18:38, 3.73s/it] 42%|████▎ | 221/520 [14:03<18:39, 3.74s/it] {'loss': 9.8469, 'grad_norm': 5.718791329327246e-05, 'learning_rate': 1.933189645289445, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:03<18:39, 3.74s/it] 43%|████▎ | 222/520 [14:07<18:41, 3.76s/it] {'loss': 9.601, 'grad_norm': 6.541545512837388e-05, 'learning_rate': 1.9242296977527413, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:07<18:41, 3.76s/it] 43%|████▎ | 223/520 [14:10<18:47, 3.80s/it] {'loss': 9.5643, 'grad_norm': 7.12967919151093e-05, 'learning_rate': 1.915253267137274, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:10<18:47, 3.80s/it] 43%|████▎ | 224/520 [14:14<18:42, 3.79s/it] {'loss': 11.1842, 'grad_norm': 8.19053792758923e-05, 'learning_rate': 1.906260702214508, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:14<18:42, 3.79s/it] 43%|████▎ | 225/520 [14:18<18:44, 3.81s/it] {'loss': 9.6984, 'grad_norm': 6.204965252401541e-05, 'learning_rate': 1.8972523523827909, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:18<18:44, 3.81s/it] 43%|████▎ | 226/520 [14:22<18:41, 3.82s/it] {'loss': 10.0096, 'grad_norm': 5.60795794621493e-05, 'learning_rate': 1.888228567653781, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:22<18:41, 3.82s/it] 44%|████▎ | 227/520 [14:26<18:42, 3.83s/it] {'loss': 9.821, 'grad_norm': 6.610438057351276e-05, 'learning_rate': 1.879189698638846, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:26<18:42, 3.83s/it] 44%|████▍ | 228/520 [14:30<18:34, 3.82s/it] {'loss': 10.8876, 'grad_norm': 6.723304824315037e-05, 'learning_rate': 1.87013609653544, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:30<18:34, 3.82s/it] 44%|████▍ | 229/520 [14:33<18:38, 3.84s/it] {'loss': 9.6881, 'grad_norm': 5.2874153862503544e-05, 'learning_rate': 1.8610681131134597, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:34<18:38, 3.84s/it] 44%|████▍ | 230/520 [14:37<18:33, 3.84s/it] {'loss': 10.0836, 'grad_norm': 6.323879823764522e-05, 'learning_rate': 1.851986100701573, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:37<18:33, 3.84s/it] 44%|████▍ | 231/520 [14:41<18:35, 3.86s/it] {'loss': 9.9022, 'grad_norm': 5.487125364308731e-05, 'learning_rate': 1.8428904121735346, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:41<18:35, 3.86s/it] 45%|████▍ | 232/520 [14:45<18:30, 3.86s/it] {'loss': 10.8417, 'grad_norm': 6.885427234061673e-05, 'learning_rate': 1.8337814009344715, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:45<18:30, 3.86s/it] 45%|████▍ | 233/520 [14:49<18:25, 3.85s/it] {'loss': 10.4946, 'grad_norm': 6.435514542108192e-05, 'learning_rate': 1.8246594209071543, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:49<18:25, 3.85s/it] 45%|████▌ | 234/520 [14:53<18:20, 3.85s/it] {'loss': 9.657, 'grad_norm': 6.026103420303414e-05, 'learning_rate': 1.8155248265182438, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:53<18:20, 3.85s/it] 45%|████▌ | 235/520 [14:57<18:21, 3.87s/it] {'loss': 9.8612, 'grad_norm': 5.455027895721608e-05, 'learning_rate': 1.8063779726845206, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:57<18:21, 3.87s/it] 45%|████▌ | 236/520 [15:00<18:13, 3.85s/it] {'loss': 10.1968, 'grad_norm': 5.6811024095216764e-05, 'learning_rate': 1.7972192147990964, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:00<18:13, 3.85s/it] 46%|████▌ | 237/520 [15:04<18:05, 3.84s/it] {'loss': 9.8065, 'grad_norm': 6.011007510770893e-05, 'learning_rate': 1.7880489087176046, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:04<18:05, 3.84s/it] 46%|████▌ | 238/520 [15:08<17:48, 3.79s/it] {'loss': 9.6747, 'grad_norm': 7.286371724283247e-05, 'learning_rate': 1.7788674107443723, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:08<17:48, 3.79s/it] 46%|████▌ | 239/520 [15:12<17:40, 3.77s/it] {'loss': 10.3862, 'grad_norm': 7.829042826266348e-05, 'learning_rate': 1.769675077618579, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:12<17:40, 3.77s/it] 46%|████▌ | 240/520 [15:15<17:32, 3.76s/it] {'loss': 9.546, 'grad_norm': 7.982725814809729e-05, 'learning_rate': 1.7604722665003958, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:15<17:32, 3.76s/it] 46%|████▋ | 241/520 [15:19<17:20, 3.73s/it] {'loss': 9.7535, 'grad_norm': 7.173140430418576e-05, 'learning_rate': 1.7512593349571046, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:19<17:20, 3.73s/it] 47%|████▋ | 242/520 [15:23<17:14, 3.72s/it] {'loss': 9.9099, 'grad_norm': 5.987181218286641e-05, 'learning_rate': 1.74203664094921, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:23<17:14, 3.72s/it] 47%|████▋ | 243/520 [15:27<17:12, 3.73s/it] {'loss': 9.6576, 'grad_norm': 5.8593071933103055e-05, 'learning_rate': 1.7328045428165273, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:27<17:12, 3.73s/it] 47%|████▋ | 244/520 [15:30<17:16, 3.75s/it] {'loss': 9.9273, 'grad_norm': 4.8510844738636986e-05, 'learning_rate': 1.7235633992642616, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:30<17:16, 3.75s/it] 47%|████▋ | 245/520 [15:34<17:12, 3.76s/it] {'loss': 9.6462, 'grad_norm': 5.9116012174344684e-05, 'learning_rate': 1.71431356934907, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:34<17:12, 3.76s/it] 47%|████▋ | 246/520 [15:38<17:14, 3.78s/it] {'loss': 10.6176, 'grad_norm': 6.595704632442066e-05, 'learning_rate': 1.7050554124651103, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:38<17:14, 3.78s/it] 48%|████▊ | 247/520 [15:42<17:12, 3.78s/it] {'loss': 10.3215, 'grad_norm': 5.2650983494070355e-05, 'learning_rate': 1.6957892883300776, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:42<17:12, 3.78s/it] 48%|████▊ | 248/520 [15:46<17:11, 3.79s/it] {'loss': 9.73, 'grad_norm': 6.421487354088856e-05, 'learning_rate': 1.686515556971228, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:46<17:11, 3.79s/it] 48%|████▊ | 249/520 [15:49<17:06, 3.79s/it] {'loss': 10.0231, 'grad_norm': 5.436575436376269e-05, 'learning_rate': 1.6772345787113894, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:49<17:06, 3.79s/it] 48%|████▊ | 250/520 [15:53<17:05, 3.80s/it] {'loss': 10.2968, 'grad_norm': 6.299820477532554e-05, 'learning_rate': 1.6679467141549618, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:53<17:05, 3.80s/it] 48%|████▊ | 251/520 [15:57<17:02, 3.80s/it] {'loss': 10.183, 'grad_norm': 5.145785232676886e-05, 'learning_rate': 1.6586523241739068, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:57<17:02, 3.80s/it] 48%|████▊ | 252/520 [16:01<16:49, 3.77s/it] {'loss': 10.2792, 'grad_norm': 5.1480218943092404e-05, 'learning_rate': 1.649351769893725, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:01<16:49, 3.77s/it] 49%|████▊ | 253/520 [16:04<16:41, 3.75s/it] {'loss': 10.3983, 'grad_norm': 5.934060808996612e-05, 'learning_rate': 1.640045412679426, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:04<16:41, 3.75s/it] 49%|████▉ | 254/520 [16:08<16:32, 3.73s/it] {'loss': 9.7149, 'grad_norm': 5.862834726998992e-05, 'learning_rate': 1.6307336141214877, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:08<16:32, 3.73s/it] 49%|████▉ | 255/520 [16:12<16:39, 3.77s/it] {'loss': 10.116, 'grad_norm': 5.4100744002832094e-05, 'learning_rate': 1.621416736021805, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:12<16:39, 3.77s/it] 49%|████▉ | 256/520 [16:16<16:45, 3.81s/it] {'loss': 9.8135, 'grad_norm': 4.925954481314806e-05, 'learning_rate': 1.6120951403796364, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:16<16:45, 3.81s/it] 49%|████▉ | 257/520 [16:20<16:46, 3.83s/it] {'loss': 9.9424, 'grad_norm': 4.8837464473230914e-05, 'learning_rate': 1.6027691893775349, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:20<16:46, 3.83s/it] 50%|████▉ | 258/520 [16:24<16:49, 3.85s/it] {'loss': 9.8263, 'grad_norm': 4.531528936492314e-05, 'learning_rate': 1.5934392453672783, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:24<16:49, 3.85s/it] 50%|████▉ | 259/520 [16:27<16:49, 3.87s/it] {'loss': 10.3078, 'grad_norm': 5.1279711288024724e-05, 'learning_rate': 1.5841056708557877, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:27<16:49, 3.87s/it] 50%|█████ | 260/520 [16:31<16:49, 3.88s/it] {'loss': 10.2977, 'grad_norm': 4.3984741449638094e-05, 'learning_rate': 1.5747688284910457, 'epoch': 0.5} + 50%|█████ | 260/520 [16:31<16:49, 3.88s/it] 50%|█████ | 261/520 [16:35<16:46, 3.89s/it] {'loss': 10.4988, 'grad_norm': 4.9133694045964084e-05, 'learning_rate': 1.5654290810480043, 'epoch': 0.5} + 50%|█████ | 261/520 [16:35<16:46, 3.89s/it] 50%|█████ | 262/520 [16:39<16:42, 3.89s/it] {'loss': 9.8171, 'grad_norm': 4.821249698777442e-05, 'learning_rate': 1.5560867914144887, 'epoch': 0.5} + 50%|█████ | 262/520 [16:39<16:42, 3.89s/it] 51%|█████ | 263/520 [16:43<16:40, 3.89s/it] {'loss': 10.5531, 'grad_norm': 5.00860460634908e-05, 'learning_rate': 1.5467423225770998, 'epoch': 0.51} + 51%|█████ | 263/520 [16:43<16:40, 3.89s/it] 51%|█████ | 264/520 [16:47<16:37, 3.90s/it] {'loss': 10.1166, 'grad_norm': 5.074071835746491e-05, 'learning_rate': 1.5373960376071094, 'epoch': 0.51} + 51%|█████ | 264/520 [16:47<16:37, 3.90s/it] 51%|█████ | 265/520 [16:51<16:32, 3.89s/it] {'loss': 10.1492, 'grad_norm': 5.6411552680599886e-05, 'learning_rate': 1.5280482996463534, 'epoch': 0.51} + 51%|█████ | 265/520 [16:51<16:32, 3.89s/it] 51%|█████ | 266/520 [16:55<16:30, 3.90s/it] {'loss': 9.1223, 'grad_norm': 6.96655039681953e-05, 'learning_rate': 1.5186994718931226, 'epoch': 0.51} + 51%|█████ | 266/520 [16:55<16:30, 3.90s/it] 51%|█████▏ | 267/520 [16:59<16:27, 3.90s/it] {'loss': 9.6118, 'grad_norm': 5.240150066929794e-05, 'learning_rate': 1.5093499175880503, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:59<16:27, 3.90s/it] 52%|█████▏ | 268/520 [17:03<16:24, 3.91s/it] {'loss': 10.8905, 'grad_norm': 6.383305177270643e-05, 'learning_rate': 1.5, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:03<16:24, 3.91s/it] 52%|█████▏ | 269/520 [17:07<16:19, 3.90s/it] {'loss': 10.044, 'grad_norm': 4.593533248706669e-05, 'learning_rate': 1.4906500824119497, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:07<16:19, 3.90s/it] 52%|█████▏ | 270/520 [17:10<16:13, 3.89s/it] {'loss': 10.0409, 'grad_norm': 4.144524831618494e-05, 'learning_rate': 1.4813005281068774, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:10<16:13, 3.89s/it] 52%|█████▏ | 271/520 [17:14<16:12, 3.91s/it] {'loss': 10.4004, 'grad_norm': 4.552148278067019e-05, 'learning_rate': 1.4719517003536469, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:14<16:12, 3.91s/it] 52%|█████▏ | 272/520 [17:18<16:12, 3.92s/it] {'loss': 10.4605, 'grad_norm': 4.580946054145702e-05, 'learning_rate': 1.4626039623928908, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:18<16:12, 3.92s/it] 52%|█████▎ | 273/520 [17:22<16:05, 3.91s/it] {'loss': 10.4315, 'grad_norm': 5.13623833807998e-05, 'learning_rate': 1.4532576774229007, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:22<16:05, 3.91s/it] 53%|█████▎ | 274/520 [17:26<16:04, 3.92s/it] {'loss': 9.6641, 'grad_norm': 7.577824833858087e-05, 'learning_rate': 1.4439132085855118, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:26<16:04, 3.92s/it] 53%|█████▎ | 275/520 [17:30<16:01, 3.92s/it] {'loss': 9.9728, 'grad_norm': 6.698730421668178e-05, 'learning_rate': 1.4345709189519962, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:30<16:01, 3.92s/it] 53%|█████▎ | 276/520 [17:34<15:58, 3.93s/it] {'loss': 10.3049, 'grad_norm': 4.479593076076928e-05, 'learning_rate': 1.425231171508954, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:34<15:58, 3.93s/it] 53%|█████▎ | 277/520 [17:38<15:56, 3.94s/it] {'loss': 10.5939, 'grad_norm': 5.1344511903281965e-05, 'learning_rate': 1.415894329144212, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:38<15:56, 3.94s/it] 53%|█████▎ | 278/520 [17:42<15:52, 3.94s/it] {'loss': 9.1993, 'grad_norm': 6.386377230114022e-05, 'learning_rate': 1.406560754632722, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:42<15:52, 3.94s/it] 54%|█████▎ | 279/520 [17:46<15:47, 3.93s/it] {'loss': 10.3744, 'grad_norm': 4.692004248443961e-05, 'learning_rate': 1.3972308106224651, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:46<15:47, 3.93s/it] 54%|█████▍ | 280/520 [17:50<15:41, 3.92s/it] {'loss': 9.7834, 'grad_norm': 5.032655327914491e-05, 'learning_rate': 1.3879048596203636, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:50<15:41, 3.92s/it] 54%|█████▍ | 281/520 [17:54<15:50, 3.98s/it] {'loss': 10.0954, 'grad_norm': 4.6141298843880344e-05, 'learning_rate': 1.378583263978195, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:54<15:50, 3.98s/it] 54%|█████▍ | 282/520 [17:58<15:55, 4.02s/it] {'loss': 9.2938, 'grad_norm': 5.5706291216938556e-05, 'learning_rate': 1.3692663858785126, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:58<15:55, 4.02s/it] 54%|█████▍ | 283/520 [18:02<15:53, 4.02s/it] {'loss': 10.2573, 'grad_norm': 4.563902697867715e-05, 'learning_rate': 1.359954587320574, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:02<15:53, 4.02s/it] 55%|█████▍ | 284/520 [18:06<15:40, 3.99s/it] {'loss': 10.5821, 'grad_norm': 4.9210501512334304e-05, 'learning_rate': 1.3506482301062752, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:06<15:40, 3.99s/it] 55%|█████▍ | 285/520 [18:10<15:28, 3.95s/it] {'loss': 9.6718, 'grad_norm': 5.6489219037794474e-05, 'learning_rate': 1.3413476758260936, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:10<15:28, 3.95s/it] 55%|█████▌ | 286/520 [18:14<15:23, 3.94s/it] {'loss': 9.7298, 'grad_norm': 6.282477662378292e-05, 'learning_rate': 1.3320532858450382, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:14<15:23, 3.94s/it] 55%|█████▌ | 287/520 [18:18<15:15, 3.93s/it] {'loss': 9.9772, 'grad_norm': 4.8538443501534836e-05, 'learning_rate': 1.3227654212886109, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:18<15:15, 3.93s/it] 55%|█████▌ | 288/520 [18:21<15:08, 3.92s/it] {'loss': 10.4057, 'grad_norm': 4.4981038414531215e-05, 'learning_rate': 1.3134844430287727, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:21<15:08, 3.92s/it] 56%|█████▌ | 289/520 [18:25<15:09, 3.94s/it] {'loss': 9.8365, 'grad_norm': 5.373851416465852e-05, 'learning_rate': 1.3042107116699229, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:25<15:09, 3.94s/it] 56%|█████▌ | 290/520 [18:29<15:01, 3.92s/it] {'loss': 9.3711, 'grad_norm': 5.9903375323964344e-05, 'learning_rate': 1.2949445875348902, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:29<15:01, 3.92s/it] 56%|█████▌ | 291/520 [18:33<14:54, 3.91s/it] {'loss': 9.6627, 'grad_norm': 6.065822954852547e-05, 'learning_rate': 1.2856864306509301, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:33<14:54, 3.91s/it] 56%|█████▌ | 292/520 [18:37<14:51, 3.91s/it] {'loss': 10.0865, 'grad_norm': 5.178539676031537e-05, 'learning_rate': 1.2764366007357382, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:37<14:51, 3.91s/it] 56%|█████▋ | 293/520 [18:41<14:44, 3.90s/it] {'loss': 9.8428, 'grad_norm': 5.776107682060578e-05, 'learning_rate': 1.2671954571834725, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:41<14:44, 3.90s/it] 57%|█████▋ | 294/520 [18:45<14:44, 3.91s/it] {'loss': 10.1925, 'grad_norm': 6.103804573346178e-05, 'learning_rate': 1.25796335905079, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:45<14:44, 3.91s/it] 57%|█████▋ | 295/520 [18:49<14:36, 3.90s/it] {'loss': 10.4849, 'grad_norm': 5.752512464059923e-05, 'learning_rate': 1.2487406650428956, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:49<14:36, 3.90s/it] 57%|█████▋ | 296/520 [18:53<14:33, 3.90s/it] {'loss': 9.371, 'grad_norm': 6.16088778225162e-05, 'learning_rate': 1.2395277334996044, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:53<14:33, 3.90s/it] 57%|█████▋ | 297/520 [18:56<14:25, 3.88s/it] {'loss': 10.0262, 'grad_norm': 5.418452446850441e-05, 'learning_rate': 1.230324922381421, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:57<14:25, 3.88s/it] 57%|█████▋ | 298/520 [19:00<14:20, 3.88s/it] {'loss': 9.579, 'grad_norm': 5.703510991563425e-05, 'learning_rate': 1.2211325892556282, 'epoch': 0.57} + 57%|█████▋ | 298/520 [19:00<14:20, 3.88s/it] 57%|█████▊ | 299/520 [19:04<14:19, 3.89s/it] {'loss': 10.5018, 'grad_norm': 4.544214472274147e-05, 'learning_rate': 1.2119510912823959, 'epoch': 0.57} + 57%|█████▊ | 299/520 [19:04<14:19, 3.89s/it] 58%|█████▊ | 300/520 [19:08<14:15, 3.89s/it] {'loss': 10.0673, 'grad_norm': 4.5081010281305664e-05, 'learning_rate': 1.202780785200904, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:08<14:15, 3.89s/it] 58%|█████▊ | 301/520 [19:12<14:13, 3.90s/it] {'loss': 9.7078, 'grad_norm': 4.995062344102544e-05, 'learning_rate': 1.1936220273154796, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:12<14:13, 3.90s/it] 58%|█████▊ | 302/520 [19:16<14:10, 3.90s/it] {'loss': 10.4813, 'grad_norm': 4.6444912371722455e-05, 'learning_rate': 1.1844751734817565, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:16<14:10, 3.90s/it] 58%|█████▊ | 303/520 [19:20<14:05, 3.90s/it] {'loss': 9.7613, 'grad_norm': 5.272872563542495e-05, 'learning_rate': 1.1753405790928457, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:20<14:05, 3.90s/it] 58%|█████▊ | 304/520 [19:24<14:05, 3.91s/it] {'loss': 10.6095, 'grad_norm': 5.9685939002665386e-05, 'learning_rate': 1.1662185990655285, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:24<14:05, 3.91s/it] 59%|█████▊ | 305/520 [19:28<14:01, 3.91s/it] {'loss': 10.3075, 'grad_norm': 5.0827948592123694e-05, 'learning_rate': 1.157109587826466, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:28<14:01, 3.91s/it] 59%|█████▉ | 306/520 [19:32<13:58, 3.92s/it] {'loss': 10.0367, 'grad_norm': 4.511999630242323e-05, 'learning_rate': 1.1480138992984275, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:32<13:58, 3.92s/it] 59%|█████▉ | 307/520 [19:36<13:55, 3.92s/it] {'loss': 9.7276, 'grad_norm': 4.614458293432366e-05, 'learning_rate': 1.1389318868865408, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:36<13:55, 3.92s/it] 59%|█████▉ | 308/520 [19:39<13:34, 3.84s/it] {'loss': 9.7794, 'grad_norm': 4.4732395923292325e-05, 'learning_rate': 1.1298639034645594, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:39<13:34, 3.84s/it] 59%|█████▉ | 309/520 [19:44<14:08, 4.02s/it] {'loss': 9.432, 'grad_norm': 5.395840489537194e-05, 'learning_rate': 1.1208103013611534, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:44<14:08, 4.02s/it] 60%|█████▉ | 310/520 [19:47<13:41, 3.91s/it] {'loss': 9.7521, 'grad_norm': 4.913576998089485e-05, 'learning_rate': 1.1117714323462187, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:47<13:41, 3.91s/it] 60%|█████▉ | 311/520 [19:51<13:21, 3.84s/it] {'loss': 9.8438, 'grad_norm': 4.959387552134407e-05, 'learning_rate': 1.1027476476172091, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:51<13:21, 3.84s/it] 60%|██████ | 312/520 [19:55<13:32, 3.91s/it] {'loss': 9.7684, 'grad_norm': 5.70690456646586e-05, 'learning_rate': 1.0937392977854925, 'epoch': 0.6} + 60%|██████ | 312/520 [19:55<13:32, 3.91s/it] 60%|██████ | 313/520 [19:59<13:14, 3.84s/it] {'loss': 8.9776, 'grad_norm': 7.283876817644342e-05, 'learning_rate': 1.084746732862726, 'epoch': 0.6} + 60%|██████ | 313/520 [19:59<13:14, 3.84s/it] 60%|██████ | 314/520 [20:03<13:31, 3.94s/it] {'loss': 9.8284, 'grad_norm': 4.62236363196147e-05, 'learning_rate': 1.0757703022472587, 'epoch': 0.6} + 60%|██████ | 314/520 [20:03<13:31, 3.94s/it] 61%|██████ | 315/520 [20:07<13:10, 3.86s/it] {'loss': 10.7576, 'grad_norm': 6.0035111581277004e-05, 'learning_rate': 1.0668103547105554, 'epoch': 0.61} + 61%|██████ | 315/520 [20:07<13:10, 3.86s/it] 61%|██████ | 316/520 [20:10<12:56, 3.81s/it] {'loss': 9.9921, 'grad_norm': 5.30578380660585e-05, 'learning_rate': 1.0578672383836436, 'epoch': 0.61} + 61%|██████ | 316/520 [20:10<12:56, 3.81s/it] 61%|██████ | 317/520 [20:14<12:47, 3.78s/it] {'loss': 9.108, 'grad_norm': 6.253046060733045e-05, 'learning_rate': 1.0489413007435906, 'epoch': 0.61} + 61%|██████ | 317/520 [20:14<12:47, 3.78s/it] 61%|██████ | 318/520 [20:18<12:39, 3.76s/it] {'loss': 10.487, 'grad_norm': 7.518194158430277e-05, 'learning_rate': 1.0400328885999988, 'epoch': 0.61} + 61%|██████ | 318/520 [20:18<12:39, 3.76s/it] 61%|██████▏ | 319/520 [20:22<12:57, 3.87s/it] {'loss': 9.3129, 'grad_norm': 5.639492835452657e-05, 'learning_rate': 1.0311423480815334, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:22<12:57, 3.87s/it] 62%|██████▏ | 320/520 [20:26<12:43, 3.82s/it] {'loss': 10.0075, 'grad_norm': 4.4814238531112066e-05, 'learning_rate': 1.0222700246224736, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:26<12:43, 3.82s/it] 62%|██████▏ | 321/520 [20:29<12:30, 3.77s/it] {'loss': 9.7567, 'grad_norm': 3.98937682531782e-05, 'learning_rate': 1.0134162629492895, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:29<12:30, 3.77s/it] 62%|██████▏ | 322/520 [20:33<12:30, 3.79s/it] {'loss': 10.4931, 'grad_norm': 4.231096482804244e-05, 'learning_rate': 1.0045814070672499, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:33<12:30, 3.79s/it] 62%|██████▏ | 323/520 [20:37<12:33, 3.83s/it] {'loss': 10.5669, 'grad_norm': 4.473580474931313e-05, 'learning_rate': 0.9957658002470542, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:37<12:33, 3.83s/it] 62%|██████▏ | 324/520 [20:41<12:34, 3.85s/it] {'loss': 9.6926, 'grad_norm': 5.079125028163663e-05, 'learning_rate': 0.986969785011497, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:41<12:34, 3.85s/it] 62%|██████▎ | 325/520 [20:45<12:31, 3.86s/it] {'loss': 10.053, 'grad_norm': 5.085478570139476e-05, 'learning_rate': 0.978193703122159, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:45<12:31, 3.86s/it] 63%|██████▎ | 326/520 [20:49<12:30, 3.87s/it] {'loss': 10.1497, 'grad_norm': 4.373455128437636e-05, 'learning_rate': 0.9694378955661279, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:49<12:30, 3.87s/it] 63%|██████▎ | 327/520 [20:52<12:26, 3.87s/it] {'loss': 10.641, 'grad_norm': 4.7327054891249494e-05, 'learning_rate': 0.9607027025427487, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:53<12:26, 3.87s/it] 63%|██████▎ | 328/520 [20:56<12:23, 3.87s/it] {'loss': 10.1119, 'grad_norm': 4.17912908199982e-05, 'learning_rate': 0.9519884634504074, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:56<12:23, 3.87s/it] 63%|██████▎ | 329/520 [21:00<12:24, 3.90s/it] {'loss': 9.2338, 'grad_norm': 5.643089137722897e-05, 'learning_rate': 0.9432955168733431, 'epoch': 0.63} + 63%|██████▎ | 329/520 [21:00<12:24, 3.90s/it] 63%|██████▎ | 330/520 [21:04<12:22, 3.91s/it] {'loss': 9.8378, 'grad_norm': 4.072108378560551e-05, 'learning_rate': 0.9346242005684922, 'epoch': 0.63} + 63%|██████▎ | 330/520 [21:04<12:22, 3.91s/it] 64%|██████▎ | 331/520 [21:08<12:18, 3.91s/it] {'loss': 9.9662, 'grad_norm': 4.339506152400585e-05, 'learning_rate': 0.9259748514523654, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:08<12:18, 3.91s/it] 64%|██████▍ | 332/520 [21:12<12:14, 3.91s/it] {'loss': 10.4318, 'grad_norm': 4.036637560802298e-05, 'learning_rate': 0.917347805587958, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:12<12:14, 3.91s/it] 64%|██████▍ | 333/520 [21:16<12:01, 3.86s/it] {'loss': 10.198, 'grad_norm': 4.22747168837384e-05, 'learning_rate': 0.9087433981716911, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:16<12:01, 3.86s/it] 64%|██████▍ | 334/520 [21:20<11:48, 3.81s/it] {'loss': 9.7457, 'grad_norm': 3.943076000595254e-05, 'learning_rate': 0.9001619635203888, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:20<11:48, 3.81s/it] 64%|██████▍ | 335/520 [21:23<11:40, 3.79s/it] {'loss': 9.6984, 'grad_norm': 3.7690755876469645e-05, 'learning_rate': 0.8916038350582877, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:23<11:40, 3.79s/it] 65%|██████▍ | 336/520 [21:27<11:32, 3.76s/it] {'loss': 9.7996, 'grad_norm': 4.482862693676419e-05, 'learning_rate': 0.883069345304083, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:27<11:32, 3.76s/it] 65%|██████▍ | 337/520 [21:31<11:27, 3.76s/it] {'loss': 10.1968, 'grad_norm': 4.9575023854145065e-05, 'learning_rate': 0.8745588258580084, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:31<11:27, 3.76s/it] 65%|██████▌ | 338/520 [21:35<11:27, 3.78s/it] {'loss': 9.9899, 'grad_norm': 4.4605038237175515e-05, 'learning_rate': 0.8660726073889511, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:35<11:27, 3.78s/it] 65%|██████▌ | 339/520 [21:38<11:22, 3.77s/it] {'loss': 10.156, 'grad_norm': 4.319236879486779e-05, 'learning_rate': 0.8576110196216057, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:38<11:22, 3.77s/it] 65%|██████▌ | 340/520 [21:42<11:15, 3.75s/it] {'loss': 9.7382, 'grad_norm': 4.4547604513293914e-05, 'learning_rate': 0.8491743913236629, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:42<11:15, 3.75s/it] 66%|██████▌ | 341/520 [21:46<11:10, 3.74s/it] {'loss': 9.809, 'grad_norm': 4.1554594180727754e-05, 'learning_rate': 0.8407630502930323, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:46<11:10, 3.74s/it] 66%|██████▌ | 342/520 [21:49<11:01, 3.72s/it] {'loss': 10.7697, 'grad_norm': 4.507821270644204e-05, 'learning_rate': 0.8323773233451114, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:49<11:01, 3.72s/it] 66%|██████▌ | 343/520 [21:53<10:58, 3.72s/it] {'loss': 10.1394, 'grad_norm': 3.9713957802900466e-05, 'learning_rate': 0.8240175363000819, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:53<10:58, 3.72s/it] 66%|██████▌ | 344/520 [21:57<10:55, 3.73s/it] {'loss': 9.6998, 'grad_norm': 4.76161439941565e-05, 'learning_rate': 0.8156840139702555, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:57<10:55, 3.73s/it] 66%|██████▋ | 345/520 [22:01<10:50, 3.72s/it] {'loss': 10.0921, 'grad_norm': 4.139023648436939e-05, 'learning_rate': 0.8073770801474495, 'epoch': 0.66} + 66%|██████▋ | 345/520 [22:01<10:50, 3.72s/it] 67%|██████▋ | 346/520 [22:04<10:48, 3.73s/it] {'loss': 10.3724, 'grad_norm': 4.260830002485793e-05, 'learning_rate': 0.799097057590407, 'epoch': 0.67} + 67%|██████▋ | 346/520 [22:04<10:48, 3.73s/it] 67%|██████▋ | 347/520 [22:08<10:44, 3.73s/it] {'loss': 9.1162, 'grad_norm': 7.671535523808035e-05, 'learning_rate': 0.7908442680122597, 'epoch': 0.67} + 67%|██████▋ | 347/520 [22:08<10:44, 3.73s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:12<10:39, 3.72s/it] {'loss': 10.5428, 'grad_norm': 6.907481677979424e-05, 'learning_rate': 0.7826190320680231, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:12<10:39, 3.72s/it] 67%|██████▋ | 349/520 [22:15<10:36, 3.72s/it] {'loss': 10.4463, 'grad_norm': 6.782198833607268e-05, 'learning_rate': 0.7744216693421403, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:15<10:36, 3.72s/it] 67%|██████▋ | 350/520 [22:19<10:31, 3.72s/it] {'loss': 9.9004, 'grad_norm': 4.263799773274967e-05, 'learning_rate': 0.7662524983360666, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:19<10:31, 3.72s/it] 68%|██████▊ | 351/520 [22:23<10:33, 3.75s/it] {'loss': 9.7101, 'grad_norm': 5.1198435229246204e-05, 'learning_rate': 0.7581118364558889, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:23<10:33, 3.75s/it] 68%|██████▊ | 352/520 [22:27<10:27, 3.74s/it] {'loss': 9.881, 'grad_norm': 4.427016097268397e-05, 'learning_rate': 0.7500000000000003, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:27<10:27, 3.74s/it] 68%|██████▊ | 353/520 [22:30<10:25, 3.75s/it] {'loss': 9.9376, 'grad_norm': 3.83734790024165e-05, 'learning_rate': 0.7419173041468043, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:30<10:25, 3.75s/it] 68%|██████▊ | 354/520 [22:34<10:20, 3.74s/it] {'loss': 10.372, 'grad_norm': 3.913977471365826e-05, 'learning_rate': 0.733864062942472, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:34<10:20, 3.74s/it] 68%|██████▊ | 355/520 [22:38<10:15, 3.73s/it] {'loss': 9.7216, 'grad_norm': 4.50130380665827e-05, 'learning_rate': 0.7258405892887398, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:38<10:15, 3.73s/it] 68%|██████▊ | 356/520 [22:42<10:08, 3.71s/it] {'loss': 10.1546, 'grad_norm': 4.41343626665625e-05, 'learning_rate': 0.717847194930753, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:42<10:08, 3.71s/it] 69%|██████▊ | 357/520 [22:45<10:04, 3.71s/it] {'loss': 9.223, 'grad_norm': 5.178082182566193e-05, 'learning_rate': 0.7098841904449489, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:45<10:04, 3.71s/it] 69%|██████▉ | 358/520 [22:49<10:00, 3.71s/it] {'loss': 9.6666, 'grad_norm': 4.009980421253864e-05, 'learning_rate': 0.7019518852269953, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:49<10:00, 3.71s/it] 69%|██████▉ | 359/520 [22:53<09:57, 3.71s/it] {'loss': 10.4588, 'grad_norm': 3.913926580428022e-05, 'learning_rate': 0.694050587479764, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:53<09:57, 3.71s/it] 69%|██████▉ | 360/520 [22:56<09:55, 3.72s/it] {'loss': 10.6809, 'grad_norm': 4.562460366482772e-05, 'learning_rate': 0.686180604201361, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:56<09:55, 3.72s/it] 69%|██████▉ | 361/520 [23:00<09:49, 3.71s/it] {'loss': 10.1261, 'grad_norm': 3.992836871827195e-05, 'learning_rate': 0.6783422411731932, 'epoch': 0.69} + 69%|██████▉ | 361/520 [23:00<09:49, 3.71s/it] 70%|██████▉ | 362/520 [23:04<09:44, 3.70s/it] {'loss': 9.752, 'grad_norm': 6.672422885727625e-05, 'learning_rate': 0.6705358029480908, 'epoch': 0.7} + 70%|██████▉ | 362/520 [23:04<09:44, 3.70s/it] 70%|██████▉ | 363/520 [23:07<09:38, 3.69s/it] {'loss': 9.9264, 'grad_norm': 4.4553867400002314e-05, 'learning_rate': 0.6627615928384742, 'epoch': 0.7} + 70%|██████▉ | 363/520 [23:07<09:38, 3.69s/it] 70%|███████ | 364/520 [23:11<09:35, 3.69s/it] {'loss': 10.5736, 'grad_norm': 4.802292444572169e-05, 'learning_rate': 0.6550199129045668, 'epoch': 0.7} + 70%|███████ | 364/520 [23:11<09:35, 3.69s/it] 70%|███████ | 365/520 [23:15<09:32, 3.69s/it] {'loss': 9.9885, 'grad_norm': 4.376206429883255e-05, 'learning_rate': 0.6473110639426617, 'epoch': 0.7} + 70%|███████ | 365/520 [23:15<09:32, 3.69s/it] 70%|███████ | 366/520 [23:19<09:27, 3.69s/it] {'loss': 9.8912, 'grad_norm': 4.741785044891366e-05, 'learning_rate': 0.6396353454734313, 'epoch': 0.7} + 70%|███████ | 366/520 [23:19<09:27, 3.69s/it] 71%|███████ | 367/520 [23:22<09:29, 3.73s/it] {'loss': 10.1651, 'grad_norm': 5.454175901936492e-05, 'learning_rate': 0.6319930557302914, 'epoch': 0.71} + 71%|███████ | 367/520 [23:22<09:29, 3.73s/it] 71%|███████ | 368/520 [23:26<09:22, 3.70s/it] {'loss': 9.7195, 'grad_norm': 5.3507325657735676e-05, 'learning_rate': 0.6243844916478156, 'epoch': 0.71} + 71%|███████ | 368/520 [23:26<09:22, 3.70s/it] 71%|███████ | 369/520 [23:30<09:19, 3.71s/it] {'loss': 9.8623, 'grad_norm': 4.0442392311334186e-05, 'learning_rate': 0.616809948850193, 'epoch': 0.71} + 71%|███████ | 369/520 [23:30<09:19, 3.71s/it] 71%|███████ | 370/520 [23:33<09:15, 3.70s/it] {'loss': 9.62, 'grad_norm': 4.31776485738304e-05, 'learning_rate': 0.6092697216397478, 'epoch': 0.71} + 71%|███████ | 370/520 [23:33<09:15, 3.70s/it] 71%|███████▏ | 371/520 [23:37<09:11, 3.70s/it] {'loss': 10.0565, 'grad_norm': 4.254713304087041e-05, 'learning_rate': 0.6017641029854996, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:37<09:11, 3.70s/it] 72%|███████▏ | 372/520 [23:41<09:07, 3.70s/it] {'loss': 10.5017, 'grad_norm': 3.817479662106257e-05, 'learning_rate': 0.5942933845117836, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:41<09:07, 3.70s/it] 72%|███████▏ | 373/520 [23:45<09:06, 3.72s/it] {'loss': 10.496, 'grad_norm': 4.2784942132854644e-05, 'learning_rate': 0.586857856486919, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:45<09:06, 3.72s/it] 72%|███████▏ | 374/520 [23:48<09:01, 3.71s/it] {'loss': 9.8442, 'grad_norm': 4.0486458781723676e-05, 'learning_rate': 0.5794578078119291, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:48<09:01, 3.71s/it] 72%|███████▏ | 375/520 [23:52<08:59, 3.72s/it] {'loss': 9.5874, 'grad_norm': 4.851655374743815e-05, 'learning_rate': 0.5720935260093177, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:52<08:59, 3.72s/it] 72%|███████▏ | 376/520 [23:56<08:54, 3.71s/it] {'loss': 9.6926, 'grad_norm': 4.167279809484334e-05, 'learning_rate': 0.5647652972118997, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:56<08:54, 3.71s/it] 72%|███████▎ | 377/520 [23:59<08:51, 3.72s/it] {'loss': 9.9905, 'grad_norm': 4.3767945130290636e-05, 'learning_rate': 0.5574734061516791, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:59<08:51, 3.72s/it] 73%|███████▎ | 378/520 [24:03<08:49, 3.73s/it] {'loss': 10.0777, 'grad_norm': 4.072366519651331e-05, 'learning_rate': 0.5502181361487904, 'epoch': 0.73} + 73%|███████▎ | 378/520 [24:03<08:49, 3.73s/it] 73%|███████▎ | 379/520 [24:07<08:44, 3.72s/it] {'loss': 9.9181, 'grad_norm': 4.1359686110406514e-05, 'learning_rate': 0.5429997691004873, 'epoch': 0.73} + 73%|███████▎ | 379/520 [24:07<08:44, 3.72s/it] 73%|███████▎ | 380/520 [24:11<08:40, 3.72s/it] {'loss': 10.2615, 'grad_norm': 3.949281875308695e-05, 'learning_rate': 0.5358185854701909, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:11<08:40, 3.72s/it] 73%|███████▎ | 381/520 [24:14<08:36, 3.72s/it] {'loss': 9.7875, 'grad_norm': 4.4824407327537154e-05, 'learning_rate': 0.5286748642765946, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:14<08:36, 3.72s/it] 73%|███████▎ | 382/520 [24:18<08:40, 3.77s/it] {'loss': 10.5365, 'grad_norm': 4.971427338701336e-05, 'learning_rate': 0.5215688830828187, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:18<08:40, 3.77s/it] 74%|███████▎ | 383/520 [24:22<08:41, 3.81s/it] {'loss': 10.0533, 'grad_norm': 7.461270046334062e-05, 'learning_rate': 0.5145009179856295, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:22<08:41, 3.81s/it] 74%|███████▍ | 384/520 [24:26<08:44, 3.86s/it] {'loss': 10.9661, 'grad_norm': 6.166520943502881e-05, 'learning_rate': 0.5074712436047113, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:26<08:44, 3.86s/it] 74%|███████▍ | 385/520 [24:30<08:42, 3.87s/it] {'loss': 9.726, 'grad_norm': 4.830830002765853e-05, 'learning_rate': 0.5004801330719941, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:30<08:42, 3.87s/it] 74%|███████▍ | 386/520 [24:34<08:41, 3.89s/it] {'loss': 9.3652, 'grad_norm': 5.567292146128137e-05, 'learning_rate': 0.4935278580210451, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:34<08:41, 3.89s/it] 74%|███████▍ | 387/520 [24:38<08:37, 3.89s/it] {'loss': 10.6511, 'grad_norm': 4.9781632824036605e-05, 'learning_rate': 0.48661468857650964, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:38<08:37, 3.89s/it] 75%|███████▍ | 388/520 [24:42<08:33, 3.89s/it] {'loss': 9.6682, 'grad_norm': 6.236083748148761e-05, 'learning_rate': 0.47974089334362063, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:42<08:33, 3.89s/it] 75%|███████▍ | 389/520 [24:46<08:31, 3.90s/it] {'loss': 10.2966, 'grad_norm': 7.099465024192245e-05, 'learning_rate': 0.47290673939775973, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:46<08:31, 3.90s/it] 75%|███████▌ | 390/520 [24:50<08:28, 3.91s/it] {'loss': 9.8237, 'grad_norm': 5.59913259968097e-05, 'learning_rate': 0.46611249227407947, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:50<08:28, 3.91s/it] 75%|███████▌ | 391/520 [24:53<08:23, 3.90s/it] {'loss': 10.1451, 'grad_norm': 5.625372704012189e-05, 'learning_rate': 0.4593584159571875, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:53<08:23, 3.90s/it] 75%|███████▌ | 392/520 [24:57<08:19, 3.90s/it] {'loss': 9.6681, 'grad_norm': 7.561397177149049e-05, 'learning_rate': 0.4526447728708909, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:57<08:19, 3.90s/it] 76%|███████▌ | 393/520 [25:01<08:14, 3.90s/it] {'loss': 10.0373, 'grad_norm': 4.804072515418898e-05, 'learning_rate': 0.4459718238679963, 'epoch': 0.76} + 76%|███████▌ | 393/520 [25:01<08:14, 3.90s/it] 76%|███████▌ | 394/520 [25:05<08:08, 3.87s/it] {'loss': 9.8419, 'grad_norm': 8.058535682511549e-05, 'learning_rate': 0.4393398282201788, 'epoch': 0.76} + 76%|███████▌ | 394/520 [25:05<08:08, 3.87s/it] 76%|███████▌ | 395/520 [25:09<07:55, 3.80s/it] {'loss': 9.8275, 'grad_norm': 0.00013702392046380493, 'learning_rate': 0.4327490436079051, 'epoch': 0.76} + 76%|███████▌ | 395/520 [25:09<07:55, 3.80s/it] 76%|███████▌ | 396/520 [25:12<07:46, 3.76s/it] {'loss': 10.0127, 'grad_norm': 0.00013460440947972048, 'learning_rate': 0.42619972611042234, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:12<07:46, 3.76s/it] 76%|███████▋ | 397/520 [25:16<07:39, 3.73s/it] {'loss': 9.9165, 'grad_norm': 0.0001523533238757869, 'learning_rate': 0.4196921301958104, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:16<07:39, 3.73s/it] 77%|███████▋ | 398/520 [25:20<07:32, 3.71s/it] {'loss': 10.3027, 'grad_norm': 0.0002618189845846958, 'learning_rate': 0.413226508711091, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:20<07:32, 3.71s/it] 77%|███████▋ | 399/520 [25:23<07:28, 3.71s/it] {'loss': 10.2426, 'grad_norm': 9.063381033938206e-05, 'learning_rate': 0.4068031128724075, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:23<07:28, 3.71s/it] 77%|███████▋ | 400/520 [25:27<07:26, 3.72s/it] {'loss': 10.174, 'grad_norm': 6.405753844526857e-05, 'learning_rate': 0.4004221922552608, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:27<07:26, 3.72s/it] 77%|███████▋ | 401/520 [25:31<07:27, 3.76s/it] {'loss': 9.0905, 'grad_norm': 7.915524564521251e-05, 'learning_rate': 0.39408399478481404, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:31<07:27, 3.76s/it] 77%|███████▋ | 402/520 [25:35<07:27, 3.79s/it] {'loss': 9.6322, 'grad_norm': 7.687026545054547e-05, 'learning_rate': 0.3877887667262599, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:35<07:27, 3.79s/it] 78%|███████▊ | 403/520 [25:39<07:20, 3.77s/it] {'loss': 9.633, 'grad_norm': 6.839669377489933e-05, 'learning_rate': 0.3815367526752516, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:39<07:20, 3.77s/it] 78%|███████▊ | 404/520 [25:42<07:12, 3.73s/it] {'loss': 9.7952, 'grad_norm': 9.676478489019697e-05, 'learning_rate': 0.3753281955483985, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:42<07:12, 3.73s/it] 78%|███████▊ | 405/520 [25:46<07:10, 3.74s/it] {'loss': 9.9907, 'grad_norm': 6.894617740626535e-05, 'learning_rate': 0.36916333657383027, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:46<07:10, 3.74s/it] 78%|███████▊ | 406/520 [25:50<07:04, 3.72s/it] {'loss': 10.4651, 'grad_norm': 0.00010888711176609595, 'learning_rate': 0.3630424152818203, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:50<07:04, 3.72s/it] 78%|███████▊ | 407/520 [25:53<07:00, 3.72s/it] {'loss': 10.2502, 'grad_norm': 7.275938006737799e-05, 'learning_rate': 0.3569656694954838, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:53<07:00, 3.72s/it] 78%|███████▊ | 408/520 [25:57<06:55, 3.71s/it] {'loss': 9.7035, 'grad_norm': 7.4905912911696e-05, 'learning_rate': 0.35093333532153315, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:57<06:55, 3.71s/it] 79%|███████▊ | 409/520 [26:01<06:50, 3.70s/it] {'loss': 10.3126, 'grad_norm': 6.876422512555151e-05, 'learning_rate': 0.3449456471411058, 'epoch': 0.79} + 79%|███████▊ | 409/520 [26:01<06:50, 3.70s/it] 79%|███████▉ | 410/520 [26:04<06:46, 3.70s/it] {'loss': 9.5713, 'grad_norm': 8.87325425465162e-05, 'learning_rate': 0.3390028376006589, 'epoch': 0.79} + 79%|███████▉ | 410/520 [26:04<06:46, 3.70s/it] 79%|███████▉ | 411/520 [26:08<06:43, 3.70s/it] {'loss': 9.982, 'grad_norm': 6.641688966241039e-05, 'learning_rate': 0.33310513760292787, 'epoch': 0.79} + 79%|███████▉ | 411/520 [26:08<06:43, 3.70s/it] 79%|███████▉ | 412/520 [26:12<06:39, 3.70s/it] {'loss': 9.9185, 'grad_norm': 7.010670534291414e-05, 'learning_rate': 0.3272527762979553, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:12<06:39, 3.70s/it] 79%|███████▉ | 413/520 [26:15<06:35, 3.70s/it] {'loss': 10.7426, 'grad_norm': 6.206220004887343e-05, 'learning_rate': 0.32144598107418976, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:15<06:35, 3.70s/it] 80%|███████▉ | 414/520 [26:19<06:33, 3.71s/it] {'loss': 9.8102, 'grad_norm': 6.621516982064358e-05, 'learning_rate': 0.31568497754964703, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:19<06:33, 3.71s/it] 80%|███████▉ | 415/520 [26:23<06:30, 3.72s/it] {'loss': 9.4384, 'grad_norm': 6.949722244600259e-05, 'learning_rate': 0.3099699895631474, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:23<06:30, 3.72s/it] 80%|████████ | 416/520 [26:27<06:24, 3.70s/it] {'loss': 10.2527, 'grad_norm': 9.82681736680637e-05, 'learning_rate': 0.30430123916561674, 'epoch': 0.8} + 80%|████████ | 416/520 [26:27<06:24, 3.70s/it] 80%|████████ | 417/520 [26:30<06:20, 3.70s/it] {'loss': 9.6593, 'grad_norm': 5.3458460973388925e-05, 'learning_rate': 0.2986789466114582, 'epoch': 0.8} + 80%|████████ | 417/520 [26:30<06:20, 3.70s/it] 80%|████████ | 418/520 [26:34<06:16, 3.69s/it] {'loss': 9.8401, 'grad_norm': 5.17475706219772e-05, 'learning_rate': 0.29310333034999747, 'epoch': 0.8} + 80%|████████ | 418/520 [26:34<06:16, 3.69s/it] 81%|████████ | 419/520 [26:38<06:10, 3.67s/it] {'loss': 10.2286, 'grad_norm': 6.952665233974762e-05, 'learning_rate': 0.28757460701699217, 'epoch': 0.81} + 81%|████████ | 419/520 [26:38<06:10, 3.67s/it] 81%|████████ | 420/520 [26:41<06:11, 3.71s/it] {'loss': 9.8361, 'grad_norm': 6.715194703694331e-05, 'learning_rate': 0.28209299142621524, 'epoch': 0.81} + 81%|████████ | 420/520 [26:41<06:11, 3.71s/it] 81%|████████ | 421/520 [26:45<06:13, 3.77s/it] {'loss': 10.0268, 'grad_norm': 8.646884895023166e-05, 'learning_rate': 0.27665869656110975, 'epoch': 0.81} + 81%|████████ | 421/520 [26:45<06:13, 3.77s/it] 81%|████████ | 422/520 [26:49<06:08, 3.76s/it] {'loss': 9.6917, 'grad_norm': 5.678558137824703e-05, 'learning_rate': 0.27127193356651214, 'epoch': 0.81} + 81%|████████ | 422/520 [26:49<06:08, 3.76s/it] 81%|████████▏ | 423/520 [26:53<06:00, 3.72s/it] {'loss': 10.409, 'grad_norm': 6.231027351180074e-05, 'learning_rate': 0.26593291174045, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:53<06:00, 3.72s/it] 82%|████████▏ | 424/520 [26:56<05:54, 3.70s/it] {'loss': 10.3211, 'grad_norm': 4.067851706796891e-05, 'learning_rate': 0.260641838526008, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:56<05:54, 3.70s/it] 82%|████████▏ | 425/520 [27:00<05:50, 3.69s/it] {'loss': 9.5248, 'grad_norm': 5.316497823081624e-05, 'learning_rate': 0.25539891950326876, 'epoch': 0.82} + 82%|████████▏ | 425/520 [27:00<05:50, 3.69s/it] 82%|████████▏ | 426/520 [27:04<05:45, 3.68s/it] {'loss': 10.3604, 'grad_norm': 4.722169210881869e-05, 'learning_rate': 0.25020435838132676, 'epoch': 0.82} + 82%|████████▏ | 426/520 [27:04<05:45, 3.68s/it] 82%|████████▏ | 427/520 [27:07<05:41, 3.68s/it] {'loss': 9.2914, 'grad_norm': 6.016372442698576e-05, 'learning_rate': 0.24505835699037037, 'epoch': 0.82} + 82%|████████▏ | 427/520 [27:07<05:41, 3.68s/it] 82%|████████▏ | 428/520 [27:11<05:36, 3.66s/it] {'loss': 9.4832, 'grad_norm': 5.99457148225717e-05, 'learning_rate': 0.2399611152738429, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:11<05:36, 3.66s/it] 82%|████████▎ | 429/520 [27:15<05:33, 3.67s/it] {'loss': 9.8944, 'grad_norm': 4.8060594746325774e-05, 'learning_rate': 0.23491283128067175, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:15<05:33, 3.67s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:18<05:30, 3.67s/it] {'loss': 9.009, 'grad_norm': 6.251898785485568e-05, 'learning_rate': 0.2299137011575738, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:18<05:30, 3.67s/it] 83%|████████▎ | 431/520 [27:22<05:29, 3.71s/it] {'loss': 10.4382, 'grad_norm': 4.127190080510619e-05, 'learning_rate': 0.22496391914143632, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:22<05:29, 3.71s/it] 83%|████████▎ | 432/520 [27:26<05:25, 3.70s/it] {'loss': 9.6187, 'grad_norm': 5.254953756501855e-05, 'learning_rate': 0.2200636775517666, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:26<05:25, 3.70s/it] 83%|████████▎ | 433/520 [27:29<05:20, 3.68s/it] {'loss': 9.8, 'grad_norm': 4.4512298104307844e-05, 'learning_rate': 0.215213166783223, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:29<05:20, 3.68s/it] 83%|████████▎ | 434/520 [27:33<05:17, 3.69s/it] {'loss': 9.2833, 'grad_norm': 7.125322505486852e-05, 'learning_rate': 0.21041257529821455, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:33<05:17, 3.69s/it] 84%|████████▎ | 435/520 [27:37<05:12, 3.67s/it] {'loss': 9.9407, 'grad_norm': 3.9556966318113836e-05, 'learning_rate': 0.20566208961958043, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:37<05:12, 3.67s/it] 84%|████████▍ | 436/520 [27:40<05:07, 3.66s/it] {'loss': 9.6781, 'grad_norm': 5.133867070483977e-05, 'learning_rate': 0.20096189432334194, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:40<05:07, 3.66s/it] 84%|████████▍ | 437/520 [27:44<05:03, 3.66s/it] {'loss': 10.124, 'grad_norm': 3.677023122424609e-05, 'learning_rate': 0.1963121720315304, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:44<05:03, 3.66s/it] 84%|████████▍ | 438/520 [27:48<05:00, 3.67s/it] {'loss': 9.293, 'grad_norm': 5.64417601672727e-05, 'learning_rate': 0.191713103405092, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:48<05:00, 3.67s/it] 84%|████████▍ | 439/520 [27:51<04:58, 3.68s/it] {'loss': 9.8207, 'grad_norm': 3.710635993859899e-05, 'learning_rate': 0.18716486713686947, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:51<04:58, 3.68s/it] 85%|████████▍ | 440/520 [27:55<04:55, 3.70s/it] {'loss': 9.7743, 'grad_norm': 4.024202108021742e-05, 'learning_rate': 0.182667639944657, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:55<04:55, 3.70s/it] 85%|████████▍ | 441/520 [27:59<04:50, 3.68s/it] {'loss': 10.2508, 'grad_norm': 3.700608105833542e-05, 'learning_rate': 0.1782215965643364, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:59<04:50, 3.68s/it] 85%|████████▌ | 442/520 [28:03<04:47, 3.68s/it] {'loss': 10.0808, 'grad_norm': 4.585635586844687e-05, 'learning_rate': 0.1738269097430855, 'epoch': 0.85} + 85%|████████▌ | 442/520 [28:03<04:47, 3.68s/it] 85%|████████▌ | 443/520 [28:06<04:43, 3.68s/it] {'loss': 9.6001, 'grad_norm': 3.998578541743363e-05, 'learning_rate': 0.16948375023266743, 'epoch': 0.85} + 85%|████████▌ | 443/520 [28:06<04:43, 3.68s/it] 85%|████████▌ | 444/520 [28:10<04:39, 3.68s/it] {'loss': 9.6221, 'grad_norm': 4.262796066849842e-05, 'learning_rate': 0.16519228678279718, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:10<04:39, 3.68s/it] 86%|████████▌ | 445/520 [28:14<04:35, 3.67s/it] {'loss': 9.361, 'grad_norm': 4.77494966011863e-05, 'learning_rate': 0.16095268613458302, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:14<04:35, 3.67s/it] 86%|████████▌ | 446/520 [28:17<04:30, 3.66s/it] {'loss': 10.1611, 'grad_norm': 3.675637931462266e-05, 'learning_rate': 0.1567651130140486, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:17<04:30, 3.66s/it] 86%|████████▌ | 447/520 [28:21<04:27, 3.66s/it] {'loss': 10.3187, 'grad_norm': 4.67269299496504e-05, 'learning_rate': 0.15262973012573394, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:21<04:27, 3.66s/it] 86%|████████▌ | 448/520 [28:24<04:23, 3.65s/it] {'loss': 9.6523, 'grad_norm': 4.3922509434017205e-05, 'learning_rate': 0.14854669814637145, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:24<04:23, 3.65s/it] 86%|████████▋ | 449/520 [28:28<04:18, 3.64s/it] {'loss': 10.5479, 'grad_norm': 4.262405813912352e-05, 'learning_rate': 0.14451617571864528, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:28<04:18, 3.64s/it] 87%|████████▋ | 450/520 [28:32<04:14, 3.64s/it] {'loss': 10.0102, 'grad_norm': 3.914709474823523e-05, 'learning_rate': 0.1405383194450251, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:32<04:14, 3.64s/it] 87%|████████▋ | 451/520 [28:35<04:12, 3.66s/it] {'loss': 10.1935, 'grad_norm': 4.3648637779433886e-05, 'learning_rate': 0.1366132838816836, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:35<04:12, 3.66s/it] 87%|████████▋ | 452/520 [28:39<04:08, 3.65s/it] {'loss': 10.1226, 'grad_norm': 3.713461314915197e-05, 'learning_rate': 0.1327412215324903, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:39<04:08, 3.65s/it] 87%|████████▋ | 453/520 [28:43<04:04, 3.65s/it] {'loss': 10.5123, 'grad_norm': 4.266284403737483e-05, 'learning_rate': 0.1289222828430855, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:43<04:04, 3.65s/it] 87%|████████▋ | 454/520 [28:46<04:01, 3.66s/it] {'loss': 9.7163, 'grad_norm': 4.359363692765784e-05, 'learning_rate': 0.1251566161950357, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:46<04:01, 3.66s/it] 88%|████████▊ | 455/520 [28:50<03:57, 3.66s/it] {'loss': 9.7177, 'grad_norm': 3.8425834572455596e-05, 'learning_rate': 0.12144436790006902, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:50<03:57, 3.66s/it] 88%|████████▊ | 456/520 [28:54<03:54, 3.67s/it] {'loss': 9.5275, 'grad_norm': 4.217858165189914e-05, 'learning_rate': 0.1177856821943884, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:54<03:54, 3.67s/it] 88%|████████▊ | 457/520 [28:57<03:50, 3.66s/it] {'loss': 10.9791, 'grad_norm': 5.446641279777491e-05, 'learning_rate': 0.11418070123306989, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:57<03:50, 3.66s/it] 88%|████████▊ | 458/520 [29:01<03:47, 3.66s/it] {'loss': 10.1764, 'grad_norm': 3.991714709235043e-05, 'learning_rate': 0.11062956508453703, 'epoch': 0.88} + 88%|████████▊ | 458/520 [29:01<03:47, 3.66s/it] 88%|████████▊ | 459/520 [29:05<03:43, 3.66s/it] {'loss': 9.9084, 'grad_norm': 3.886212028475431e-05, 'learning_rate': 0.10713241172511967, 'epoch': 0.88} + 88%|████████▊ | 459/520 [29:05<03:43, 3.66s/it] 88%|████████▊ | 460/520 [29:08<03:40, 3.67s/it] {'loss': 9.7311, 'grad_norm': 4.723643796445006e-05, 'learning_rate': 0.1036893770336938, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:08<03:40, 3.67s/it] 89%|████████▊ | 461/520 [29:12<03:37, 3.69s/it] {'loss': 10.6028, 'grad_norm': 4.3242385030322265e-05, 'learning_rate': 0.10030059478640024, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:12<03:37, 3.69s/it] 89%|████████▉ | 462/520 [29:16<03:33, 3.69s/it] {'loss': 10.5392, 'grad_norm': 4.1634044714783195e-05, 'learning_rate': 0.09696619665144901, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:16<03:33, 3.69s/it] 89%|████████▉ | 463/520 [29:19<03:29, 3.67s/it] {'loss': 10.065, 'grad_norm': 4.969452342021563e-05, 'learning_rate': 0.09368631218400136, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:19<03:29, 3.67s/it] 89%|████████▉ | 464/520 [29:23<03:26, 3.68s/it] {'loss': 10.2337, 'grad_norm': 3.9082445417003414e-05, 'learning_rate': 0.09046106882113752, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:23<03:26, 3.68s/it] 89%|████████▉ | 465/520 [29:27<03:22, 3.69s/it] {'loss': 10.276, 'grad_norm': 3.8236547358373623e-05, 'learning_rate': 0.0872905918769048, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:27<03:22, 3.69s/it] 90%|████████▉ | 466/520 [29:31<03:19, 3.70s/it] {'loss': 9.6068, 'grad_norm': 3.7718442138513254e-05, 'learning_rate': 0.08417500453744864, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:31<03:19, 3.70s/it] 90%|████████▉ | 467/520 [29:34<03:16, 3.70s/it] {'loss': 10.3944, 'grad_norm': 4.041197000340724e-05, 'learning_rate': 0.08111442785622597, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:34<03:16, 3.70s/it] 90%|█████████ | 468/520 [29:38<03:11, 3.69s/it] {'loss': 10.3605, 'grad_norm': 4.8451992368137474e-05, 'learning_rate': 0.07810898074930245, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:38<03:11, 3.69s/it] 90%|█████████ | 469/520 [29:42<03:07, 3.68s/it] {'loss': 10.1458, 'grad_norm': 3.9008550335697054e-05, 'learning_rate': 0.075158779990731, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:42<03:07, 3.68s/it] 90%|█████████ | 470/520 [29:45<03:07, 3.74s/it] {'loss': 9.7317, 'grad_norm': 4.272223606466184e-05, 'learning_rate': 0.07226394020801646, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:45<03:07, 3.74s/it] 91%|█████████ | 471/520 [29:49<03:04, 3.77s/it] {'loss': 10.6864, 'grad_norm': 5.127515200595866e-05, 'learning_rate': 0.06942457387765977, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:49<03:04, 3.77s/it] 91%|█████████ | 472/520 [29:53<03:02, 3.81s/it] {'loss': 9.8806, 'grad_norm': 4.554747202780826e-05, 'learning_rate': 0.06664079132078882, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:53<03:02, 3.81s/it] 91%|█████████ | 473/520 [29:57<02:59, 3.82s/it] {'loss': 9.9687, 'grad_norm': 4.0726278623498015e-05, 'learning_rate': 0.06391270069887289, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:57<02:59, 3.82s/it] 91%|█████████ | 474/520 [30:01<02:55, 3.82s/it] {'loss': 10.7263, 'grad_norm': 4.432519882640113e-05, 'learning_rate': 0.061240408009518355, 'epoch': 0.91} + 91%|█████████ | 474/520 [30:01<02:55, 3.82s/it] 91%|█████████▏| 475/520 [30:05<02:50, 3.79s/it] {'loss': 9.9224, 'grad_norm': 3.500249613947308e-05, 'learning_rate': 0.058624017082350766, 'epoch': 0.91} + 91%|█████████▏| 475/520 [30:05<02:50, 3.79s/it] 92%|█████████▏| 476/520 [30:08<02:45, 3.77s/it] {'loss': 9.9865, 'grad_norm': 3.694087069789144e-05, 'learning_rate': 0.056063629574981955, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:08<02:45, 3.77s/it] 92%|█████████▏| 477/520 [30:12<02:40, 3.73s/it] {'loss': 9.7916, 'grad_norm': 3.536662442394117e-05, 'learning_rate': 0.05355934496905851, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:12<02:40, 3.73s/it] 92%|█████████▏| 478/520 [30:16<02:36, 3.72s/it] {'loss': 9.6613, 'grad_norm': 3.7634263125200264e-05, 'learning_rate': 0.0511112605663977, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:16<02:36, 3.72s/it] 92%|█████████▏| 479/520 [30:19<02:31, 3.70s/it] {'loss': 10.5692, 'grad_norm': 4.005188157084406e-05, 'learning_rate': 0.048719471485205834, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:19<02:31, 3.70s/it] 92%|█████████▏| 480/520 [30:23<02:28, 3.71s/it] {'loss': 10.6467, 'grad_norm': 4.270669559206087e-05, 'learning_rate': 0.046384070656383225, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:23<02:28, 3.71s/it] 92%|█████████▎| 481/520 [30:27<02:24, 3.71s/it] {'loss': 10.2874, 'grad_norm': 3.4979264832216376e-05, 'learning_rate': 0.044105148819913564, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:27<02:24, 3.71s/it] 93%|█████████▎| 482/520 [30:30<02:20, 3.69s/it] {'loss': 10.612, 'grad_norm': 4.0813053434680126e-05, 'learning_rate': 0.04188279452133825, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:30<02:20, 3.69s/it] 93%|█████████▎| 483/520 [30:34<02:16, 3.69s/it] {'loss': 9.8942, 'grad_norm': 3.3369020906240476e-05, 'learning_rate': 0.039717094108314976, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:34<02:16, 3.69s/it] 93%|█████████▎| 484/520 [30:38<02:12, 3.68s/it] {'loss': 10.1781, 'grad_norm': 3.7517692121157167e-05, 'learning_rate': 0.03760813172726457, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:38<02:12, 3.68s/it] 93%|█████████▎| 485/520 [30:41<02:08, 3.68s/it] {'loss': 9.6709, 'grad_norm': 3.6099816636512497e-05, 'learning_rate': 0.03555598932009996, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:41<02:08, 3.68s/it] 93%|█████████▎| 486/520 [30:45<02:05, 3.70s/it] {'loss': 9.8228, 'grad_norm': 3.319243575988787e-05, 'learning_rate': 0.03356074662104319, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:45<02:05, 3.70s/it] 94%|█████████▎| 487/520 [30:49<02:01, 3.69s/it] {'loss': 9.4798, 'grad_norm': 3.852066567530035e-05, 'learning_rate': 0.03162248115352745, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:49<02:01, 3.69s/it] 94%|█████████▍| 488/520 [30:53<01:57, 3.68s/it] {'loss': 9.8525, 'grad_norm': 3.872018669845185e-05, 'learning_rate': 0.02974126822718426, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:53<01:57, 3.68s/it] 94%|█████████▍| 489/520 [30:56<01:54, 3.69s/it] {'loss': 10.0672, 'grad_norm': 3.2377105103280346e-05, 'learning_rate': 0.027917180934918517, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:56<01:54, 3.69s/it] 94%|█████████▍| 490/520 [31:00<01:50, 3.68s/it] {'loss': 9.7506, 'grad_norm': 3.4725462323394354e-05, 'learning_rate': 0.02615029015006759, 'epoch': 0.94} + 94%|█████████▍| 490/520 [31:00<01:50, 3.68s/it] 94%|█████████▍| 491/520 [31:04<01:47, 3.71s/it] {'loss': 9.8377, 'grad_norm': 3.6697101531697246e-05, 'learning_rate': 0.024440664523648015, 'epoch': 0.94} + 94%|█████████▍| 491/520 [31:04<01:47, 3.71s/it] 95%|█████████▍| 492/520 [31:07<01:43, 3.69s/it] {'loss': 9.908, 'grad_norm': 3.301924093330755e-05, 'learning_rate': 0.02278837048168797, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:07<01:43, 3.69s/it] 95%|█████████▍| 493/520 [31:11<01:39, 3.69s/it] {'loss': 10.8338, 'grad_norm': 4.8672885704575956e-05, 'learning_rate': 0.02119347222264617, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:11<01:39, 3.69s/it] 95%|█████████▌| 494/520 [31:15<01:35, 3.69s/it] {'loss': 9.8558, 'grad_norm': 3.310191346846777e-05, 'learning_rate': 0.019656031714918365, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:15<01:35, 3.69s/it] 95%|█████████▌| 495/520 [31:18<01:32, 3.69s/it] {'loss': 9.2789, 'grad_norm': 4.113403520751092e-05, 'learning_rate': 0.018176108694427928, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:18<01:32, 3.69s/it] 95%|█████████▌| 496/520 [31:22<01:28, 3.69s/it] {'loss': 9.665, 'grad_norm': 4.0291701070077626e-05, 'learning_rate': 0.016753760662307216, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:22<01:28, 3.69s/it] 96%|█████████▌| 497/520 [31:26<01:24, 3.69s/it] {'loss': 10.0185, 'grad_norm': 3.275485538182524e-05, 'learning_rate': 0.01538904288266102, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:26<01:24, 3.69s/it] 96%|█████████▌| 498/520 [31:29<01:20, 3.68s/it] {'loss': 9.5029, 'grad_norm': 3.760634773165206e-05, 'learning_rate': 0.014082008380420785, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:29<01:20, 3.68s/it] 96%|█████████▌| 499/520 [31:33<01:17, 3.69s/it] {'loss': 10.6698, 'grad_norm': 4.264141015481435e-05, 'learning_rate': 0.012832707939284427, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:33<01:17, 3.69s/it] 96%|█████████▌| 500/520 [31:37<01:13, 3.69s/it] {'loss': 10.2263, 'grad_norm': 3.8770568042858136e-05, 'learning_rate': 0.011641190099741905, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:37<01:13, 3.69s/it] 96%|█████████▋| 501/520 [31:40<01:09, 3.68s/it] {'loss': 10.6094, 'grad_norm': 4.1051032925884096e-05, 'learning_rate': 0.010507501157190569, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:40<01:09, 3.68s/it] 97%|█████████▋| 502/520 [31:44<01:06, 3.68s/it] {'loss': 9.7157, 'grad_norm': 3.4184607806653915e-05, 'learning_rate': 0.009431685160136094, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:44<01:06, 3.68s/it] 97%|█████████▋| 503/520 [31:48<01:02, 3.69s/it] {'loss': 10.3085, 'grad_norm': 3.5980180832933234e-05, 'learning_rate': 0.008413783908480355, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:48<01:02, 3.69s/it] 97%|█████████▋| 504/520 [31:52<00:59, 3.69s/it] {'loss': 10.4587, 'grad_norm': 4.578475503411892e-05, 'learning_rate': 0.007453836951897885, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:52<00:59, 3.69s/it] 97%|█████████▋| 505/520 [31:55<00:55, 3.70s/it] {'loss': 9.925, 'grad_norm': 3.496721916492302e-05, 'learning_rate': 0.00655188158829928, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:55<00:55, 3.70s/it] 97%|█████████▋| 506/520 [31:59<00:51, 3.68s/it] {'loss': 9.5366, 'grad_norm': 3.85036175614883e-05, 'learning_rate': 0.005707952862381682, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:59<00:51, 3.68s/it] 98%|█████████▊| 507/520 [32:03<00:48, 3.69s/it] {'loss': 10.9023, 'grad_norm': 4.837885702044265e-05, 'learning_rate': 0.004922083564267377, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:03<00:48, 3.69s/it] 98%|█████████▊| 508/520 [32:06<00:44, 3.69s/it] {'loss': 10.0149, 'grad_norm': 3.427847173213252e-05, 'learning_rate': 0.004194304228229806, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:06<00:44, 3.69s/it] 98%|█████████▊| 509/520 [32:10<00:40, 3.70s/it] {'loss': 9.5456, 'grad_norm': 3.626433217388168e-05, 'learning_rate': 0.0035246431315066884, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:10<00:40, 3.70s/it] 98%|█████████▊| 510/520 [32:14<00:36, 3.68s/it] {'loss': 9.7838, 'grad_norm': 3.6402942356072395e-05, 'learning_rate': 0.0029131262932022284, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:14<00:36, 3.68s/it] 98%|█████████▊| 511/520 [32:17<00:33, 3.68s/it] {'loss': 9.7565, 'grad_norm': 3.5488597816521685e-05, 'learning_rate': 0.002359777473275093, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:17<00:33, 3.68s/it] 98%|█████████▊| 512/520 [32:21<00:29, 3.71s/it] {'loss': 9.3839, 'grad_norm': 4.089308335856276e-05, 'learning_rate': 0.0018646181716164834, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:21<00:29, 3.71s/it] 99%|█████████▊| 513/520 [32:25<00:25, 3.71s/it] {'loss': 9.9176, 'grad_norm': 3.5517941040053586e-05, 'learning_rate': 0.0014276676272133026, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:25<00:25, 3.71s/it] 99%|█████████▉| 514/520 [32:28<00:22, 3.70s/it] {'loss': 9.8757, 'grad_norm': 3.401699054187345e-05, 'learning_rate': 0.0010489428174020876, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:29<00:22, 3.70s/it] 99%|█████████▉| 515/520 [32:32<00:18, 3.69s/it] {'loss': 10.1558, 'grad_norm': 3.5851751040874544e-05, 'learning_rate': 0.0007284584572085362, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:32<00:18, 3.69s/it] 99%|█████████▉| 516/520 [32:36<00:14, 3.69s/it] {'loss': 9.9681, 'grad_norm': 3.918686982231399e-05, 'learning_rate': 0.0004662269987756318, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:36<00:14, 3.69s/it] 99%|█████████▉| 517/520 [32:39<00:11, 3.67s/it] {'loss': 10.5411, 'grad_norm': 3.9267516844272636e-05, 'learning_rate': 0.00026225863088036316, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:39<00:11, 3.67s/it] 100%|█████████▉| 518/520 [32:43<00:07, 3.65s/it] {'loss': 9.8877, 'grad_norm': 3.7149801174759727e-05, 'learning_rate': 0.00011656127853770792, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:43<00:07, 3.65s/it] 100%|█████████▉| 519/520 [32:47<00:03, 3.65s/it] {'loss': 10.4273, 'grad_norm': 3.97619644810764e-05, 'learning_rate': 2.9140602692712125e-05, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:47<00:03, 3.65s/it] 100%|██████████| 520/520 [32:51<00:00, 3.91s/it] {'loss': 10.4112, 'grad_norm': 4.043307652843732e-05, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:51<00:00, 3.91s/it] {'train_runtime': 1971.7894, 'train_samples_per_second': 33.74, 'train_steps_per_second': 0.264, 'train_loss': 10.074092732484525, 'epoch': 1.0} + 100%|██████████| 520/520 [32:51<00:00, 3.91s/it] 100%|██████████| 520/520 [32:51<00:00, 3.79s/it] +[2025-10-09 05:48:50,533] [INFO] [launch.py:348:main] Process 809229 exits successfully. +[2025-10-09 05:48:50,534] [INFO] [launch.py:348:main] Process 809228 exits successfully. +[2025-10-09 05:48:50,534] [INFO] [launch.py:348:main] Process 809230 exits successfully. +[2025-10-09 05:48:50,535] [INFO] [launch.py:348:main] Process 809227 exits successfully. +[2025-10-09 05:48:50,535] [INFO] [launch.py:348:main] Process 809231 exits successfully. +[2025-10-09 05:48:51,536] [INFO] [launch.py:348:main] Process 809226 exits successfully. +[2025-10-09 05:48:51,537] [INFO] [launch.py:348:main] Process 809232 exits successfully. +[2025-10-09 05:48:54,540] [INFO] [launch.py:348:main] Process 809225 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3_connector-3.0_0.5_3_ablation_20251009_051327.log +Timestamp: 2025-10-09 05:48:57 +===================================== diff --git a/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251009_081157.log b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251009_081157.log new file mode 100644 index 0000000000000000000000000000000000000000..9eba6e42d7f57c190699cef8520fa55999a6c77e --- /dev/null +++ b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251009_081157.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251009_081157.log +Timestamp: 2025-10-09 08:11:57 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 08:12:00,286] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:12:03,309] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-09 08:12:03,310] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 3e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 3e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 08:12:05,955] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:12:06,981] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-09 08:12:06,981] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-09 08:12:06,982] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-09 08:12:06,982] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-09 08:12:06,982] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-09 08:12:06,982] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-09 08:12:06,982] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-09 08:12:06,984] [INFO] [launch.py:253:main] process 920731 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:12:06,986] [INFO] [launch.py:253:main] process 920732 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:12:06,988] [INFO] [launch.py:253:main] process 920733 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:12:06,990] [INFO] [launch.py:253:main] process 920734 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:12:06,992] [INFO] [launch.py:253:main] process 920735 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:12:06,994] [INFO] [launch.py:253:main] process 920736 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:12:06,996] [INFO] [launch.py:253:main] process 920737 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:12:06,998] [INFO] [launch.py:253:main] process 920738 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 08:12:13,578] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:12:13,805] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:12:13,840] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:12:13,840] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:12:13,868] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:12:13,875] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:12:13,877] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:12:13,901] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:12:13,996] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:12:14,220] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:12:14,249] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:12:14,254] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:12:14,280] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:12:14,282] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:12:14,282] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-09 08:12:14,288] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:12:14,307] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:920731:920731 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920731:920731 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920731:920731 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:920731:920731 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:920731:920731 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:920731:920731 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:920732:920732 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:920732:920732 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920732:920732 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920732:920732 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:920732:920732 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:920732:920732 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:920736:920736 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:920736:920736 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920736:920736 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920734:920734 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:920734:920734 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920734:920734 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920736:920736 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:920736:920736 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:920736:920736 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:920734:920734 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:920734:920734 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:920734:920734 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:920733:920733 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:920733:920733 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920733:920733 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920733:920733 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:920733:920733 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:920733:920733 [2] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:920737:920737 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:920737:920737 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920737:920737 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920737:920737 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:920737:920737 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:920737:920737 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:920735:920735 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:920735:920735 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920735:920735 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920735:920735 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:920735:920735 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:920735:920735 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:920738:920738 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:920738:920738 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920738:920738 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920738:920738 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:920738:920738 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:920738:920738 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO ncclCommInitRank comm 0x56189b7af540 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x1f4ce5ea1bc02c9b - Init START +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO ncclCommInitRank comm 0x5580b052a7e0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x1f4ce5ea1bc02c9b - Init START +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO ncclCommInitRank comm 0x5597d902f200 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x1f4ce5ea1bc02c9b - Init START +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO ncclCommInitRank comm 0x56117fc76120 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x1f4ce5ea1bc02c9b - Init START +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO ncclCommInitRank comm 0x562e047076f0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x1f4ce5ea1bc02c9b - Init START +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO ncclCommInitRank comm 0x559b629c1cb0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x1f4ce5ea1bc02c9b - Init START +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO ncclCommInitRank comm 0x55f317cf3d00 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x1f4ce5ea1bc02c9b - Init START +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO ncclCommInitRank comm 0x5573db481590 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x1f4ce5ea1bc02c9b - Init START +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO comm 0x55f317cf3d00 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO comm 0x562e047076f0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO comm 0x56117fc76120 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO comm 0x5597d902f200 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO comm 0x5580b052a7e0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO comm 0x559b629c1cb0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO comm 0x5573db481590 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO comm 0x56189b7af540 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:920738:922348 [7] NCCL INFO ncclCommInitRank comm 0x56189b7af540 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x1f4ce5ea1bc02c9b - Init COMPLETE +ywang29-vrdb-test1-worker-0:920735:922347 [4] NCCL INFO ncclCommInitRank comm 0x5597d902f200 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x1f4ce5ea1bc02c9b - Init COMPLETE +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:920736:922343 [5] NCCL INFO ncclCommInitRank comm 0x562e047076f0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x1f4ce5ea1bc02c9b - Init COMPLETE +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:920732:922342 [1] NCCL INFO ncclCommInitRank comm 0x559b629c1cb0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x1f4ce5ea1bc02c9b - Init COMPLETE +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:920737:922346 [6] NCCL INFO ncclCommInitRank comm 0x55f317cf3d00 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x1f4ce5ea1bc02c9b - Init COMPLETE +ywang29-vrdb-test1-worker-0:920734:922344 [3] NCCL INFO ncclCommInitRank comm 0x56117fc76120 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x1f4ce5ea1bc02c9b - Init COMPLETE +ywang29-vrdb-test1-worker-0:920733:922345 [2] NCCL INFO ncclCommInitRank comm 0x5580b052a7e0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x1f4ce5ea1bc02c9b - Init COMPLETE +ywang29-vrdb-test1-worker-0:920731:922341 [0] NCCL INFO ncclCommInitRank comm 0x5573db481590 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x1f4ce5ea1bc02c9b - Init COMPLETE +[2025-10-09 08:12:56,567] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.laSome weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model + /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +yers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-09 08:19:38,093] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-09 08:19:51,230 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-09 08:19:51,236 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:007->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:920737:927704 [6] NCCL INFO ncclCommInitRank comm 0x7f0e0006b130 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x891a61eba4a42f1a - Init COMPLETE +ywang29-vrdb-test1-worker-0:920735:927703 [4] NCCL INFO ncclCommInitRank comm 0x7f8d2806aba0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x891a61eba4a42f1a - Init COMPLETE +ywang29-vrdb-test1-worker-0:920731:927699 [0] NCCL INFO ncclCommInitRank comm 0x7ff1d006ac80 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x891a61eba4a42f1a - Init COMPLETE +ywang29-vrdb-test1-worker-0:920733:927702 [2] NCCL INFO ncclCommInitRank comm 0x7f516006ab50 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x891a61eba4a42f1a - Init COMPLETE +ywang29-vrdb-test1-worker-0:920732:927705 [1] NCCL INFO ncclCommInitRank comm 0x7f81b806aa20 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x891a61eba4a42f1a - Init COMPLETE +ywang29-vrdb-test1-worker-0:920736:927701 [5] NCCL INFO ncclCommInitRank comm 0x7f36fc06abe0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x891a61eba4a42f1a - Init COMPLETE +ywang29-vrdb-test1-worker-0:920738:927706 [7] NCCL INFO ncclCommInitRank comm 0x7f284006a940 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x891a61eba4a42f1a - Init COMPLETE +ywang29-vrdb-test1-worker-0:920734:927700 [3] NCCL INFO ncclCommInitRank comm 0x7f10c806acd0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x891a61eba4a42f1a - Init COMPLETE + 0%| | 1/520 [00:12<1:51:40, 12.91s/it] {'loss': 2.0453, 'grad_norm': 0.004835900355770686, 'learning_rate': 0.01875, 'epoch': 0.0} + 0%| | 1/520 [00:12<1:51:40, 12.91s/it] 0%| | 2/520 [00:16<1:04:50, 7.51s/it] {'loss': 2.0549, 'grad_norm': 0.00524845852004904, 'learning_rate': 0.0375, 'epoch': 0.0} + 0%| | 2/520 [00:16<1:04:50, 7.51s/it] 1%| | 3/520 [00:20<49:51, 5.79s/it] {'loss': 2.1899, 'grad_norm': 0.006006523607845096, 'learning_rate': 0.056249999999999994, 'epoch': 0.01} + 1%| | 3/520 [00:20<49:51, 5.79s/it] 1%| | 4/520 [00:24<42:29, 4.94s/it] {'loss': 2.0656, 'grad_norm': 0.004963310346059799, 'learning_rate': 0.075, 'epoch': 0.01} + 1%| | 4/520 [00:24<42:29, 4.94s/it] 1%| | 5/520 [00:27<38:21, 4.47s/it] {'loss': 1.7895, 'grad_norm': 0.002770854802498767, 'learning_rate': 0.09375, 'epoch': 0.01} + 1%| | 5/520 [00:27<38:21, 4.47s/it] 1%| | 6/520 [00:31<35:58, 4.20s/it] {'loss': 1.4321, 'grad_norm': 0.0006533890699553689, 'learning_rate': 0.11249999999999999, 'epoch': 0.01} + 1%| | 6/520 [00:31<35:58, 4.20s/it] 1%|▏ | 7/520 [00:34<34:22, 4.02s/it] {'loss': 1.5226, 'grad_norm': 0.000779131018497036, 'learning_rate': 0.13125, 'epoch': 0.01} + 1%|▏ | 7/520 [00:34<34:22, 4.02s/it] 2%|▏ | 8/520 [00:39<35:02, 4.11s/it] {'loss': 1.5407, 'grad_norm': 0.0005916019997463595, 'learning_rate': 0.15, 'epoch': 0.02} + 2%|▏ | 8/520 [00:39<35:02, 4.11s/it] 2%|▏ | 9/520 [00:43<35:08, 4.13s/it] {'loss': 1.5879, 'grad_norm': 0.0005226546821564885, 'learning_rate': 0.16874999999999998, 'epoch': 0.02} + 2%|▏ | 9/520 [00:43<35:08, 4.13s/it] 2%|▏ | 10/520 [00:47<33:50, 3.98s/it] {'loss': 1.423, 'grad_norm': 0.0006160306046557459, 'learning_rate': 0.1875, 'epoch': 0.02} + 2%|▏ | 10/520 [00:47<33:50, 3.98s/it] 2%|▏ | 11/520 [00:50<33:26, 3.94s/it] {'loss': 1.4662, 'grad_norm': 0.0005974268907724836, 'learning_rate': 0.20625, 'epoch': 0.02} + 2%|▏ | 11/520 [00:50<33:26, 3.94s/it] 2%|▏ | 12/520 [00:54<32:34, 3.85s/it] {'loss': 1.3358, 'grad_norm': 0.0005452547925222023, 'learning_rate': 0.22499999999999998, 'epoch': 0.02} + 2%|▏ | 12/520 [00:54<32:34, 3.85s/it][2025-10-09 08:20:54,968] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [00:58<33:48, 4.00s/it] {'loss': 1.4012, 'grad_norm': 0.0006539610056790682, 'learning_rate': 0.24375, 'epoch': 0.03} + 2%|▎ | 13/520 [00:58<33:48, 4.00s/it] 3%|▎ | 14/520 [01:02<32:55, 3.90s/it] {'loss': 1.4481, 'grad_norm': 0.000927889623179401, 'learning_rate': 0.2625, 'epoch': 0.03} + 3%|▎ | 14/520 [01:02<32:55, 3.90s/it] 3%|▎ | 15/520 [01:06<32:16, 3.83s/it] {'loss': 1.3729, 'grad_norm': 0.0008357688448487284, 'learning_rate': 0.28125, 'epoch': 0.03} + 3%|▎ | 15/520 [01:06<32:16, 3.83s/it] 3%|▎ | 16/520 [01:09<31:53, 3.80s/it] {'loss': 1.3317, 'grad_norm': 0.0010097942308240197, 'learning_rate': 0.3, 'epoch': 0.03} + 3%|▎ | 16/520 [01:10<31:53, 3.80s/it] 3%|▎ | 17/520 [01:13<31:28, 3.75s/it] {'loss': 1.4591, 'grad_norm': 0.0012217213618717978, 'learning_rate': 0.2999970859397307, 'epoch': 0.03} + 3%|▎ | 17/520 [01:13<31:28, 3.75s/it] 3%|▎ | 18/520 [01:17<31:10, 3.73s/it] {'loss': 1.3161, 'grad_norm': 0.001368747433033007, 'learning_rate': 0.2999883438721462, 'epoch': 0.03} + 3%|▎ | 18/520 [01:17<31:10, 3.73s/it] 4%|▎ | 19/520 [01:21<31:09, 3.73s/it] {'loss': 1.3393, 'grad_norm': 0.0015707381159666106, 'learning_rate': 0.29997377413691195, 'epoch': 0.04} + 4%|▎ | 19/520 [01:21<31:09, 3.73s/it] 4%|▍ | 20/520 [01:24<31:00, 3.72s/it] {'loss': 1.3027, 'grad_norm': 0.0018750345856287523, 'learning_rate': 0.29995337730012245, 'epoch': 0.04} + 4%|▍ | 20/520 [01:24<31:00, 3.72s/it] 4%|▍ | 21/520 [01:28<30:51, 3.71s/it] {'loss': 1.3895, 'grad_norm': 0.003140444346587738, 'learning_rate': 0.2999271541542791, 'epoch': 0.04} + 4%|▍ | 21/520 [01:28<30:51, 3.71s/it] 4%|▍ | 22/520 [01:32<30:42, 3.70s/it] {'loss': 1.4794, 'grad_norm': 0.0023379878662323087, 'learning_rate': 0.2998951057182598, 'epoch': 0.04} + 4%|▍ | 22/520 [01:32<30:42, 3.70s/it] 4%|▍ | 23/520 [01:35<30:34, 3.69s/it] {'loss': 1.406, 'grad_norm': 0.0018983667453092176, 'learning_rate': 0.2998572332372787, 'epoch': 0.04} + 4%|▍ | 23/520 [01:35<30:34, 3.69s/it] 5%|▍ | 24/520 [01:39<30:38, 3.71s/it] {'loss': 1.3377, 'grad_norm': 0.0020318968199964037, 'learning_rate': 0.29981353818283835, 'epoch': 0.05} + 5%|▍ | 24/520 [01:39<30:38, 3.71s/it] 5%|▍ | 25/520 [01:43<30:29, 3.70s/it] {'loss': 1.4197, 'grad_norm': 0.002497535456842908, 'learning_rate': 0.29976402225267246, 'epoch': 0.05} + 5%|▍ | 25/520 [01:43<30:29, 3.70s/it] 5%|▌ | 26/520 [01:46<30:19, 3.68s/it] {'loss': 1.375, 'grad_norm': 0.001984513776991269, 'learning_rate': 0.2997086873706798, 'epoch': 0.05} + 5%|▌ | 26/520 [01:46<30:19, 3.68s/it] 5%|▌ | 27/520 [01:50<30:22, 3.70s/it] {'loss': 1.3216, 'grad_norm': 0.0023967028786124767, 'learning_rate': 0.2996475356868493, 'epoch': 0.05} + 5%|▌ | 27/520 [01:50<30:22, 3.70s/it] 5%|▌ | 28/520 [01:54<30:10, 3.68s/it] {'loss': 1.3353, 'grad_norm': 0.0025806845894307826, 'learning_rate': 0.299580569577177, 'epoch': 0.05} + 5%|▌ | 28/520 [01:54<30:10, 3.68s/it] 6%|▌ | 29/520 [01:57<30:03, 3.67s/it] {'loss': 1.356, 'grad_norm': 0.0021876832468292707, 'learning_rate': 0.2995077916435733, 'epoch': 0.06} + 6%|▌ | 29/520 [01:57<30:03, 3.67s/it] 6%|▌ | 30/520 [02:01<30:06, 3.69s/it] {'loss': 1.4379, 'grad_norm': 0.001955558187733015, 'learning_rate': 0.29942920471376183, 'epoch': 0.06} + 6%|▌ | 30/520 [02:01<30:06, 3.69s/it] 6%|▌ | 31/520 [02:05<30:02, 3.69s/it] {'loss': 1.3484, 'grad_norm': 0.00206688267551231, 'learning_rate': 0.29934481184117007, 'epoch': 0.06} + 6%|▌ | 31/520 [02:05<30:02, 3.69s/it] 6%|▌ | 32/520 [02:09<30:06, 3.70s/it] {'loss': 1.2963, 'grad_norm': 0.0025001346559681047, 'learning_rate': 0.2992546163048102, 'epoch': 0.06} + 6%|▌ | 32/520 [02:09<30:06, 3.70s/it] 6%|▋ | 33/520 [02:12<29:51, 3.68s/it] {'loss': 1.3652, 'grad_norm': 0.002451694877042516, 'learning_rate': 0.29915862160915196, 'epoch': 0.06} + 6%|▋ | 33/520 [02:12<29:51, 3.68s/it] 7%|▋ | 34/520 [02:16<29:44, 3.67s/it] {'loss': 1.356, 'grad_norm': 0.0024835405616788347, 'learning_rate': 0.2990568314839864, 'epoch': 0.07} + 7%|▋ | 34/520 [02:16<29:44, 3.67s/it] 7%|▋ | 35/520 [02:19<29:37, 3.66s/it] {'loss': 1.3602, 'grad_norm': 0.0024997866278553254, 'learning_rate': 0.2989492498842809, 'epoch': 0.07} + 7%|▋ | 35/520 [02:19<29:37, 3.66s/it] 7%|▋ | 36/520 [02:23<29:31, 3.66s/it] {'loss': 1.4747, 'grad_norm': 0.0022143847301946806, 'learning_rate': 0.29883588099002584, 'epoch': 0.07} + 7%|▋ | 36/520 [02:23<29:31, 3.66s/it] 7%|▋ | 37/520 [02:27<29:29, 3.66s/it] {'loss': 1.4563, 'grad_norm': 0.002042547253189403, 'learning_rate': 0.29871672920607156, 'epoch': 0.07} + 7%|▋ | 37/520 [02:27<29:29, 3.66s/it] 7%|▋ | 38/520 [02:30<29:25, 3.66s/it] {'loss': 1.5663, 'grad_norm': 0.0023820966688502636, 'learning_rate': 0.2985917991619579, 'epoch': 0.07} + 7%|▋ | 38/520 [02:30<29:25, 3.66s/it] 8%|▊ | 39/520 [02:34<29:19, 3.66s/it] {'loss': 1.3981, 'grad_norm': 0.002623874328359189, 'learning_rate': 0.2984610957117339, 'epoch': 0.07} + 8%|▊ | 39/520 [02:34<29:19, 3.66s/it] 8%|▊ | 40/520 [02:38<29:41, 3.71s/it] {'loss': 1.4335, 'grad_norm': 0.0020121657118781925, 'learning_rate': 0.29832462393376924, 'epoch': 0.08} + 8%|▊ | 40/520 [02:38<29:41, 3.71s/it] 8%|▊ | 41/520 [02:42<30:05, 3.77s/it] {'loss': 1.4085, 'grad_norm': 0.0021715777621841145, 'learning_rate': 0.29818238913055717, 'epoch': 0.08} + 8%|▊ | 41/520 [02:42<30:05, 3.77s/it] 8%|▊ | 42/520 [02:46<30:15, 3.80s/it] {'loss': 1.4456, 'grad_norm': 0.0029058021617108525, 'learning_rate': 0.29803439682850813, 'epoch': 0.08} + 8%|▊ | 42/520 [02:46<30:15, 3.80s/it] 8%|▊ | 43/520 [02:50<30:19, 3.81s/it] {'loss': 1.3944, 'grad_norm': 0.0019036483567318036, 'learning_rate': 0.29788065277773534, 'epoch': 0.08} + 8%|▊ | 43/520 [02:50<30:19, 3.81s/it] 8%|▊ | 44/520 [02:53<30:26, 3.84s/it] {'loss': 1.4845, 'grad_norm': 0.002102565866879004, 'learning_rate': 0.2977211629518312, 'epoch': 0.08} + 8%|▊ | 44/520 [02:53<30:26, 3.84s/it] 9%|▊ | 45/520 [02:57<30:28, 3.85s/it] {'loss': 1.4632, 'grad_norm': 0.0023634648181673178, 'learning_rate': 0.2975559335476352, 'epoch': 0.09} + 9%|▊ | 45/520 [02:57<30:28, 3.85s/it] 9%|▉ | 46/520 [03:01<30:35, 3.87s/it] {'loss': 1.5415, 'grad_norm': 0.002075110377012177, 'learning_rate': 0.2973849709849932, 'epoch': 0.09} + 9%|▉ | 46/520 [03:01<30:35, 3.87s/it] 9%|▉ | 47/520 [03:05<30:27, 3.86s/it] {'loss': 1.4337, 'grad_norm': 0.0021612326481613418, 'learning_rate': 0.29720828190650816, 'epoch': 0.09} + 9%|▉ | 47/520 [03:05<30:27, 3.86s/it] 9%|▉ | 48/520 [03:09<30:32, 3.88s/it] {'loss': 1.4007, 'grad_norm': 0.0022505327334964025, 'learning_rate': 0.2970258731772816, 'epoch': 0.09} + 9%|▉ | 48/520 [03:09<30:32, 3.88s/it] 9%|▉ | 49/520 [03:13<30:22, 3.87s/it] {'loss': 1.4593, 'grad_norm': 0.002259024925900454, 'learning_rate': 0.2968377518846473, 'epoch': 0.09} + 9%|▉ | 49/520 [03:13<30:22, 3.87s/it] 10%|▉ | 50/520 [03:17<30:19, 3.87s/it] {'loss': 1.4405, 'grad_norm': 0.0020538706770160953, 'learning_rate': 0.2966439253378957, 'epoch': 0.1} + 10%|▉ | 50/520 [03:17<30:19, 3.87s/it] 10%|▉ | 51/520 [03:20<30:02, 3.84s/it] {'loss': 1.369, 'grad_norm': 0.0021564330297052584, 'learning_rate': 0.29644440106798997, 'epoch': 0.1} + 10%|▉ | 51/520 [03:20<30:02, 3.84s/it] 10%|█ | 52/520 [03:24<29:28, 3.78s/it] {'loss': 1.5115, 'grad_norm': 0.002243788741201218, 'learning_rate': 0.2962391868272735, 'epoch': 0.1} + 10%|█ | 52/520 [03:24<29:28, 3.78s/it] 10%|█ | 53/520 [03:28<29:13, 3.75s/it] {'loss': 1.492, 'grad_norm': 0.0019687775350404843, 'learning_rate': 0.29602829058916846, 'epoch': 0.1} + 10%|█ | 53/520 [03:28<29:13, 3.75s/it] 10%|█ | 54/520 [03:32<29:05, 3.74s/it] {'loss': 1.4042, 'grad_norm': 0.0018584231087904594, 'learning_rate': 0.29581172054786614, 'epoch': 0.1} + 10%|█ | 54/520 [03:32<29:05, 3.74s/it] 11%|█ | 55/520 [03:35<28:44, 3.71s/it] {'loss': 1.3706, 'grad_norm': 0.002018433394405488, 'learning_rate': 0.29558948511800864, 'epoch': 0.11} + 11%|█ | 55/520 [03:35<28:44, 3.71s/it] 11%|█ | 56/520 [03:39<28:34, 3.70s/it] {'loss': 1.5073, 'grad_norm': 0.0020981664332575365, 'learning_rate': 0.2953615929343617, 'epoch': 0.11} + 11%|█ | 56/520 [03:39<28:34, 3.70s/it] 11%|█ | 57/520 [03:43<28:34, 3.70s/it] {'loss': 1.3522, 'grad_norm': 0.002118412695805318, 'learning_rate': 0.2951280528514794, 'epoch': 0.11} + 11%|█ | 57/520 [03:43<28:34, 3.70s/it] 11%|█ | 58/520 [03:46<28:26, 3.69s/it] {'loss': 1.5248, 'grad_norm': 0.0016552300341833599, 'learning_rate': 0.2948888739433602, 'epoch': 0.11} + 11%|█ | 58/520 [03:46<28:26, 3.69s/it] 11%|█▏ | 59/520 [03:50<28:20, 3.69s/it] {'loss': 1.3553, 'grad_norm': 0.0018363821174197793, 'learning_rate': 0.29464406550309413, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:50<28:20, 3.69s/it] 12%|█▏ | 60/520 [03:54<28:12, 3.68s/it] {'loss': 1.4374, 'grad_norm': 0.0017280614057646232, 'learning_rate': 0.29439363704250177, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:54<28:12, 3.68s/it] 12%|█▏ | 61/520 [03:57<28:08, 3.68s/it] {'loss': 1.4618, 'grad_norm': 0.0020286052875910956, 'learning_rate': 0.29413759829176495, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:57<28:08, 3.68s/it] 12%|█▏ | 62/520 [04:01<28:03, 3.68s/it] {'loss': 1.4114, 'grad_norm': 0.0018398228439728372, 'learning_rate': 0.29387595919904813, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:01<28:03, 3.68s/it] 12%|█▏ | 63/520 [04:05<27:54, 3.66s/it] {'loss': 1.3969, 'grad_norm': 0.0017167454211468886, 'learning_rate': 0.2936087299301127, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:05<27:54, 3.66s/it] 12%|█▏ | 64/520 [04:08<27:51, 3.67s/it] {'loss': 1.4277, 'grad_norm': 0.0017809423716013011, 'learning_rate': 0.2933359208679211, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:08<27:51, 3.67s/it] 12%|█▎ | 65/520 [04:12<27:47, 3.67s/it] {'loss': 1.4294, 'grad_norm': 0.0021632693909247017, 'learning_rate': 0.293057542612234, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:12<27:47, 3.67s/it] 13%|█▎ | 66/520 [04:16<27:43, 3.66s/it] {'loss': 1.3815, 'grad_norm': 0.0015069545199944757, 'learning_rate': 0.2927736059791984, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:16<27:43, 3.66s/it] 13%|█▎ | 67/520 [04:19<27:47, 3.68s/it] {'loss': 1.2639, 'grad_norm': 0.0015502883841431716, 'learning_rate': 0.2924841220009269, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:19<27:47, 3.68s/it] 13%|█▎ | 68/520 [04:23<27:40, 3.67s/it] {'loss': 1.3332, 'grad_norm': 0.001686195924831938, 'learning_rate': 0.29218910192506975, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:23<27:40, 3.67s/it] 13%|█▎ | 69/520 [04:27<27:41, 3.68s/it] {'loss': 1.309, 'grad_norm': 0.0014698878420550063, 'learning_rate': 0.2918885572143774, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:27<27:41, 3.68s/it] 13%|█▎ | 70/520 [04:30<27:34, 3.68s/it] {'loss': 1.3618, 'grad_norm': 0.0016456222733152739, 'learning_rate': 0.29158249954625515, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:30<27:34, 3.68s/it] 14%|█▎ | 71/520 [04:34<27:32, 3.68s/it] {'loss': 1.2901, 'grad_norm': 0.0015265782798715532, 'learning_rate': 0.29127094081230953, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:34<27:32, 3.68s/it] 14%|█▍ | 72/520 [04:38<27:43, 3.71s/it] {'loss': 1.4318, 'grad_norm': 0.0015503227001945148, 'learning_rate': 0.2909538931178863, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:38<27:43, 3.71s/it] 14%|█▍ | 73/520 [04:42<27:45, 3.73s/it] {'loss': 1.2664, 'grad_norm': 0.0014112924594800243, 'learning_rate': 0.2906313687815999, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:42<27:45, 3.73s/it] 14%|█▍ | 74/520 [04:45<27:49, 3.74s/it] {'loss': 1.379, 'grad_norm': 0.00156928753187071, 'learning_rate': 0.2903033803348551, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:45<27:49, 3.74s/it] 14%|█▍ | 75/520 [04:49<28:01, 3.78s/it] {'loss': 1.2862, 'grad_norm': 0.001315026375771205, 'learning_rate': 0.28996994052135994, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:49<28:01, 3.78s/it] 15%|█▍ | 76/520 [04:53<27:44, 3.75s/it] {'loss': 1.5319, 'grad_norm': 0.006810007046319112, 'learning_rate': 0.2896310622966306, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:53<27:44, 3.75s/it] 15%|█▍ | 77/520 [04:57<27:35, 3.74s/it] {'loss': 1.2116, 'grad_norm': 0.0015055499657530098, 'learning_rate': 0.289286758827488, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:57<27:35, 3.74s/it] 15%|█▌ | 78/520 [05:00<27:22, 3.72s/it] {'loss': 1.3282, 'grad_norm': 0.00146100009816174, 'learning_rate': 0.2889370434915463, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:00<27:22, 3.72s/it] 15%|█▌ | 79/520 [05:04<27:20, 3.72s/it] {'loss': 1.3093, 'grad_norm': 0.0013283597245012334, 'learning_rate': 0.288581929876693, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:04<27:20, 3.72s/it] 15%|█▌ | 80/520 [05:08<27:17, 3.72s/it] {'loss': 1.5749, 'grad_norm': 0.0038977304520031366, 'learning_rate': 0.28822143178056114, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:08<27:17, 3.72s/it] 16%|█▌ | 81/520 [05:11<27:12, 3.72s/it] {'loss': 1.4573, 'grad_norm': 0.0016088989770257627, 'learning_rate': 0.28785556320999306, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:11<27:12, 3.72s/it] 16%|█▌ | 82/520 [05:15<27:14, 3.73s/it] {'loss': 1.3847, 'grad_norm': 0.0012844040896818325, 'learning_rate': 0.2874843383804964, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:15<27:14, 3.73s/it] 16%|█▌ | 83/520 [05:19<27:07, 3.72s/it] {'loss': 1.3986, 'grad_norm': 0.0013500400605818432, 'learning_rate': 0.28710777171569146, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:19<27:07, 3.72s/it] 16%|█▌ | 84/520 [05:23<27:00, 3.72s/it] {'loss': 1.3965, 'grad_norm': 0.0013460530369864144, 'learning_rate': 0.28672587784675097, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:23<27:00, 3.72s/it] 16%|█▋ | 85/520 [05:26<26:51, 3.70s/it] {'loss': 1.424, 'grad_norm': 0.0012342200249667798, 'learning_rate': 0.28633867161183163, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:26<26:51, 3.70s/it] 17%|█▋ | 86/520 [05:30<27:04, 3.74s/it] {'loss': 1.4352, 'grad_norm': 0.0012858713224013626, 'learning_rate': 0.2859461680554975, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:30<27:04, 3.74s/it] 17%|█▋ | 87/520 [05:34<26:56, 3.73s/it] {'loss': 1.4378, 'grad_norm': 0.0014400595556616847, 'learning_rate': 0.2855483824281355, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:34<26:56, 3.73s/it] 17%|█▋ | 88/520 [05:37<26:49, 3.72s/it] {'loss': 1.4279, 'grad_norm': 0.0018604971631595538, 'learning_rate': 0.28514533018536287, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:37<26:49, 3.72s/it] 17%|█▋ | 89/520 [05:41<26:43, 3.72s/it] {'loss': 1.3797, 'grad_norm': 0.0012456793208244056, 'learning_rate': 0.2847370269874266, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:41<26:43, 3.72s/it] 17%|█▋ | 90/520 [05:45<26:36, 3.71s/it] {'loss': 1.3111, 'grad_norm': 0.0014005361452502545, 'learning_rate': 0.28432348869859514, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:45<26:36, 3.71s/it] 18%|█▊ | 91/520 [05:49<26:37, 3.72s/it] {'loss': 1.3885, 'grad_norm': 0.0011426530825634908, 'learning_rate': 0.2839047313865417, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:49<26:37, 3.72s/it] 18%|█▊ | 92/520 [05:52<26:30, 3.72s/it] {'loss': 1.329, 'grad_norm': 0.0012006010359618055, 'learning_rate': 0.2834807713217203, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:52<26:30, 3.72s/it] 18%|█▊ | 93/520 [05:56<26:24, 3.71s/it] {'loss': 1.3301, 'grad_norm': 0.0011961170561141704, 'learning_rate': 0.28305162497673325, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:56<26:24, 3.71s/it] 18%|█▊ | 94/520 [06:00<26:27, 3.73s/it] {'loss': 1.4319, 'grad_norm': 0.0012218068813896464, 'learning_rate': 0.28261730902569143, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:00<26:27, 3.73s/it] 18%|█▊ | 95/520 [06:04<26:24, 3.73s/it] {'loss': 1.3079, 'grad_norm': 0.001313231691752447, 'learning_rate': 0.28217784034356636, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:04<26:24, 3.73s/it] 18%|█▊ | 96/520 [06:07<26:19, 3.72s/it] {'loss': 1.3277, 'grad_norm': 0.0010901458429028953, 'learning_rate': 0.2817332360055343, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:07<26:19, 3.72s/it] 19%|█▊ | 97/520 [06:11<26:11, 3.71s/it] {'loss': 1.2847, 'grad_norm': 0.001286433180939028, 'learning_rate': 0.28128351328631307, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:11<26:11, 3.71s/it] 19%|█▉ | 98/520 [06:15<26:00, 3.70s/it] {'loss': 1.2849, 'grad_norm': 0.000932688772720053, 'learning_rate': 0.2808286896594908, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:15<26:00, 3.70s/it] 19%|█▉ | 99/520 [06:18<25:59, 3.70s/it] {'loss': 1.3053, 'grad_norm': 0.0011694790419673406, 'learning_rate': 0.28036878279684696, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:18<25:59, 3.70s/it] 19%|█▉ | 100/520 [06:22<25:57, 3.71s/it] {'loss': 1.3447, 'grad_norm': 0.001445494458005932, 'learning_rate': 0.27990381056766583, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:22<25:57, 3.71s/it] 19%|█▉ | 101/520 [06:26<25:48, 3.70s/it] {'loss': 1.3074, 'grad_norm': 0.0011487691508286785, 'learning_rate': 0.27943379103804195, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:26<25:48, 3.70s/it] 20%|█▉ | 102/520 [06:29<25:52, 3.71s/it] {'loss': 1.3087, 'grad_norm': 0.0013950337982123074, 'learning_rate': 0.27895874247017854, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:29<25:52, 3.71s/it] 20%|█▉ | 103/520 [06:33<25:50, 3.72s/it] {'loss': 1.2438, 'grad_norm': 0.0010245965709548357, 'learning_rate': 0.27847868332167774, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:33<25:50, 3.72s/it] 20%|██ | 104/520 [06:37<25:41, 3.71s/it] {'loss': 1.3104, 'grad_norm': 0.0010561472611417932, 'learning_rate': 0.2779936322448233, 'epoch': 0.2} + 20%|██ | 104/520 [06:37<25:41, 3.71s/it] 20%|██ | 105/520 [06:41<25:32, 3.69s/it] {'loss': 1.309, 'grad_norm': 0.0009770126486783849, 'learning_rate': 0.27750360808585633, 'epoch': 0.2} + 20%|██ | 105/520 [06:41<25:32, 3.69s/it] 20%|██ | 106/520 [06:44<25:23, 3.68s/it] {'loss': 1.3621, 'grad_norm': 0.001051705370302067, 'learning_rate': 0.27700862988424263, 'epoch': 0.2} + 20%|██ | 106/520 [06:44<25:23, 3.68s/it] 21%|██ | 107/520 [06:48<25:23, 3.69s/it] {'loss': 1.3262, 'grad_norm': 0.0010703799045645391, 'learning_rate': 0.27650871687193285, 'epoch': 0.21} + 21%|██ | 107/520 [06:48<25:23, 3.69s/it] 21%|██ | 108/520 [06:52<25:19, 3.69s/it] {'loss': 1.2505, 'grad_norm': 0.0010179965166580676, 'learning_rate': 0.27600388847261575, 'epoch': 0.21} + 21%|██ | 108/520 [06:52<25:19, 3.69s/it] 21%|██ | 109/520 [06:55<25:16, 3.69s/it] {'loss': 1.3214, 'grad_norm': 0.0010482447551201667, 'learning_rate': 0.27549416430096296, 'epoch': 0.21} + 21%|██ | 109/520 [06:55<25:16, 3.69s/it] 21%|██ | 110/520 [06:59<25:34, 3.74s/it] {'loss': 1.4461, 'grad_norm': 0.001054079707865095, 'learning_rate': 0.2749795641618673, 'epoch': 0.21} + 21%|██ | 110/520 [06:59<25:34, 3.74s/it] 21%|██▏ | 111/520 [07:03<25:44, 3.78s/it] {'loss': 1.4512, 'grad_norm': 0.0010706064566929606, 'learning_rate': 0.27446010804967313, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:03<25:44, 3.78s/it] 22%|██▏ | 112/520 [07:07<25:48, 3.79s/it] {'loss': 1.3344, 'grad_norm': 0.0010367251714552526, 'learning_rate': 0.2739358161473992, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:07<25:48, 3.79s/it] 22%|██▏ | 113/520 [07:11<25:47, 3.80s/it] {'loss': 1.2176, 'grad_norm': 0.0009356071358788481, 'learning_rate': 0.27340670882595497, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:11<25:47, 3.80s/it] 22%|██▏ | 114/520 [07:14<25:28, 3.77s/it] {'loss': 1.3202, 'grad_norm': 0.0009236390563855235, 'learning_rate': 0.2728728066433488, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:14<25:28, 3.77s/it] 22%|██▏ | 115/520 [07:18<25:21, 3.76s/it] {'loss': 1.4279, 'grad_norm': 0.0009632698048619353, 'learning_rate': 0.27233413034388904, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:18<25:21, 3.76s/it] 22%|██▏ | 116/520 [07:22<25:15, 3.75s/it] {'loss': 1.4261, 'grad_norm': 0.000909436685051352, 'learning_rate': 0.2717907008573785, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:22<25:15, 3.75s/it] 22%|██▎ | 117/520 [07:25<25:01, 3.73s/it] {'loss': 1.3935, 'grad_norm': 0.0009946394409944554, 'learning_rate': 0.2712425392983008, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:25<25:01, 3.73s/it] 23%|██▎ | 118/520 [07:29<25:04, 3.74s/it] {'loss': 1.3008, 'grad_norm': 0.0009021135906701353, 'learning_rate': 0.27068966696500024, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:29<25:04, 3.74s/it] 23%|██▎ | 119/520 [07:33<24:56, 3.73s/it] {'loss': 1.249, 'grad_norm': 0.0009517804861027477, 'learning_rate': 0.2701321053388542, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:33<24:56, 3.73s/it] 23%|██▎ | 120/520 [07:37<25:04, 3.76s/it] {'loss': 1.2775, 'grad_norm': 0.0010544491110238776, 'learning_rate': 0.26956987608343835, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:37<25:04, 3.76s/it] 23%|██▎ | 121/520 [07:41<25:12, 3.79s/it] {'loss': 1.3283, 'grad_norm': 0.0009703451285807672, 'learning_rate': 0.26900300104368524, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:41<25:12, 3.79s/it] 23%|██▎ | 122/520 [07:44<25:17, 3.81s/it] {'loss': 1.2177, 'grad_norm': 0.0008330682405521696, 'learning_rate': 0.2684315022450353, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:44<25:17, 3.81s/it] 24%|██▎ | 123/520 [07:48<25:19, 3.83s/it] {'loss': 1.3849, 'grad_norm': 0.0009826960747180637, 'learning_rate': 0.26785540189258106, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:48<25:19, 3.83s/it] 24%|██▍ | 124/520 [07:52<25:20, 3.84s/it] {'loss': 1.3, 'grad_norm': 0.0010852917034733253, 'learning_rate': 0.2672747223702045, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:52<25:20, 3.84s/it] 24%|██▍ | 125/520 [07:56<25:13, 3.83s/it] {'loss': 1.2821, 'grad_norm': 0.0009408331621197077, 'learning_rate': 0.26668948623970723, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:56<25:13, 3.83s/it] 24%|██▍ | 126/520 [08:00<26:14, 4.00s/it] {'loss': 1.3133, 'grad_norm': 0.000760848713874392, 'learning_rate': 0.2660997162399341, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:00<26:14, 4.00s/it] 24%|██▍ | 127/520 [08:04<25:38, 3.92s/it] {'loss': 1.2626, 'grad_norm': 0.000984385165787255, 'learning_rate': 0.26550543528588944, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:04<25:38, 3.92s/it] 25%|██▍ | 128/520 [08:08<25:09, 3.85s/it] {'loss': 1.3149, 'grad_norm': 0.001049462588305171, 'learning_rate': 0.26490666646784666, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:08<25:09, 3.85s/it] 25%|██▍ | 129/520 [08:12<24:55, 3.82s/it] {'loss': 1.264, 'grad_norm': 0.0007847900972890463, 'learning_rate': 0.2643034330504516, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:12<24:55, 3.82s/it] 25%|██▌ | 130/520 [08:15<24:36, 3.79s/it] {'loss': 1.3101, 'grad_norm': 0.0009367024318221983, 'learning_rate': 0.2636957584718179, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:15<24:36, 3.79s/it] 25%|██▌ | 131/520 [08:19<24:25, 3.77s/it] {'loss': 1.2887, 'grad_norm': 0.0008099967266115239, 'learning_rate': 0.263083666342617, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:19<24:25, 3.77s/it] 25%|██▌ | 132/520 [08:23<24:17, 3.76s/it] {'loss': 1.3473, 'grad_norm': 0.0009850380929773757, 'learning_rate': 0.2624671804451601, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:23<24:17, 3.76s/it] 26%|██▌ | 133/520 [08:26<24:06, 3.74s/it] {'loss': 1.2674, 'grad_norm': 0.0009350488083902492, 'learning_rate': 0.2618463247324748, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:26<24:06, 3.74s/it] 26%|██▌ | 134/520 [08:30<24:04, 3.74s/it] {'loss': 1.3381, 'grad_norm': 0.0008499107174987877, 'learning_rate': 0.26122112332737396, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:30<24:04, 3.74s/it] 26%|██▌ | 135/520 [08:34<23:59, 3.74s/it] {'loss': 1.4029, 'grad_norm': 0.0009045310054521549, 'learning_rate': 0.2605916005215186, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:34<23:59, 3.74s/it] 26%|██▌ | 136/520 [08:38<23:54, 3.74s/it] {'loss': 1.3315, 'grad_norm': 0.0008431276539283928, 'learning_rate': 0.2599577807744739, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:38<23:54, 3.74s/it] 26%|██▋ | 137/520 [08:41<23:51, 3.74s/it] {'loss': 1.2472, 'grad_norm': 0.0009565362301093651, 'learning_rate': 0.25931968871275923, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:41<23:51, 3.74s/it] 27%|██▋ | 138/520 [08:45<23:46, 3.73s/it] {'loss': 1.2667, 'grad_norm': 0.0007561120079036945, 'learning_rate': 0.2586773491288909, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:45<23:46, 3.73s/it] 27%|██▋ | 139/520 [08:49<23:46, 3.74s/it] {'loss': 1.1875, 'grad_norm': 0.0007919004529575188, 'learning_rate': 0.258030786980419, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:49<23:46, 3.74s/it] 27%|██▋ | 140/520 [08:53<23:42, 3.74s/it] {'loss': 1.3264, 'grad_norm': 0.0007515003943805975, 'learning_rate': 0.25738002738895777, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:53<23:42, 3.74s/it] 27%|██▋ | 141/520 [08:56<23:51, 3.78s/it] {'loss': 1.3691, 'grad_norm': 0.000746485799835488, 'learning_rate': 0.25672509563920953, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:56<23:51, 3.78s/it] 27%|██▋ | 142/520 [09:00<23:49, 3.78s/it] {'loss': 1.352, 'grad_norm': 0.0007538687822647614, 'learning_rate': 0.2560660171779821, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:00<23:49, 3.78s/it] 28%|██▊ | 143/520 [09:04<23:33, 3.75s/it] {'loss': 1.2986, 'grad_norm': 0.0010073286069329221, 'learning_rate': 0.2554028176132004, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:04<23:33, 3.75s/it] 28%|██▊ | 144/520 [09:08<23:23, 3.73s/it] {'loss': 1.2563, 'grad_norm': 0.0008400078076091971, 'learning_rate': 0.2547355227129109, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:08<23:23, 3.73s/it] 28%|██▊ | 145/520 [09:11<23:21, 3.74s/it] {'loss': 1.1913, 'grad_norm': 0.0007275404677550541, 'learning_rate': 0.25406415840428126, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:11<23:21, 3.74s/it] 28%|██▊ | 146/520 [09:15<23:13, 3.73s/it] {'loss': 1.4043, 'grad_norm': 0.0008121739581828547, 'learning_rate': 0.25338875077259204, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:15<23:13, 3.73s/it] 28%|██▊ | 147/520 [09:19<23:07, 3.72s/it] {'loss': 1.2354, 'grad_norm': 0.0007755122914956825, 'learning_rate': 0.252709326060224, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:19<23:07, 3.72s/it] 28%|██▊ | 148/520 [09:23<23:07, 3.73s/it] {'loss': 1.2565, 'grad_norm': 0.0007479698108816642, 'learning_rate': 0.2520259106656379, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:23<23:07, 3.73s/it] 29%|██▊ | 149/520 [09:26<22:59, 3.72s/it] {'loss': 1.2045, 'grad_norm': 0.0007742730839267729, 'learning_rate': 0.25133853114234905, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:26<22:59, 3.72s/it] 29%|██▉ | 150/520 [09:30<22:57, 3.72s/it] {'loss': 1.4223, 'grad_norm': 0.0007785556164817255, 'learning_rate': 0.2506472141978955, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:30<22:57, 3.72s/it] 29%|██▉ | 151/520 [09:34<22:54, 3.73s/it] {'loss': 1.2518, 'grad_norm': 0.0008511039724878134, 'learning_rate': 0.24995198669280058, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:34<22:54, 3.73s/it] 29%|██▉ | 152/520 [09:37<22:45, 3.71s/it] {'loss': 1.2189, 'grad_norm': 0.0007677262321332905, 'learning_rate': 0.24925287563952891, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:37<22:45, 3.71s/it] 29%|██▉ | 153/520 [09:41<22:39, 3.70s/it] {'loss': 1.2518, 'grad_norm': 0.0008606822875481935, 'learning_rate': 0.24854990820143708, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:41<22:39, 3.70s/it] 30%|██▉ | 154/520 [09:45<22:31, 3.69s/it] {'loss': 1.3432, 'grad_norm': 0.0007235531502118163, 'learning_rate': 0.24784311169171816, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:45<22:31, 3.69s/it] 30%|██▉ | 155/520 [09:48<22:24, 3.68s/it] {'loss': 1.2538, 'grad_norm': 0.0008153229066634202, 'learning_rate': 0.24713251357234056, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:48<22:24, 3.68s/it] 30%|███ | 156/520 [09:52<22:25, 3.70s/it] {'loss': 1.2785, 'grad_norm': 0.0009918428403407873, 'learning_rate': 0.24641814145298088, 'epoch': 0.3} + 30%|███ | 156/520 [09:52<22:25, 3.70s/it] 30%|███ | 157/520 [09:56<22:24, 3.70s/it] {'loss': 1.391, 'grad_norm': 0.0007458248623881768, 'learning_rate': 0.24570002308995129, 'epoch': 0.3} + 30%|███ | 157/520 [09:56<22:24, 3.70s/it] 30%|███ | 158/520 [10:00<22:23, 3.71s/it] {'loss': 1.2588, 'grad_norm': 0.0008003206195599537, 'learning_rate': 0.24497818638512098, 'epoch': 0.3} + 30%|███ | 158/520 [10:00<22:23, 3.71s/it] 31%|███ | 159/520 [10:03<22:33, 3.75s/it] {'loss': 1.2868, 'grad_norm': 0.0008140759794812443, 'learning_rate': 0.2442526593848321, 'epoch': 0.31} + 31%|███ | 159/520 [10:03<22:33, 3.75s/it] 31%|███ | 160/520 [10:07<22:41, 3.78s/it] {'loss': 1.3161, 'grad_norm': 0.0007737301311154023, 'learning_rate': 0.24352347027881002, 'epoch': 0.31} + 31%|███ | 160/520 [10:07<22:41, 3.78s/it] 31%|███ | 161/520 [10:11<22:29, 3.76s/it] {'loss': 1.2824, 'grad_norm': 0.0006907648938042495, 'learning_rate': 0.24279064739906822, 'epoch': 0.31} + 31%|███ | 161/520 [10:11<22:29, 3.76s/it] 31%|███ | 162/520 [10:15<22:24, 3.75s/it] {'loss': 1.3303, 'grad_norm': 0.0007469700624444843, 'learning_rate': 0.2420542192188071, 'epoch': 0.31} + 31%|███ | 162/520 [10:15<22:24, 3.75s/it] 31%|███▏ | 163/520 [10:18<22:14, 3.74s/it] {'loss': 1.1893, 'grad_norm': 0.0008189318594984836, 'learning_rate': 0.2413142143513081, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:18<22:14, 3.74s/it] 32%|███▏ | 164/520 [10:22<22:09, 3.73s/it] {'loss': 1.1435, 'grad_norm': 0.000691811985793834, 'learning_rate': 0.2405706615488216, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:22<22:09, 3.73s/it] 32%|███▏ | 165/520 [10:26<22:04, 3.73s/it] {'loss': 1.2856, 'grad_norm': 0.0006647545804426976, 'learning_rate': 0.23982358970145004, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:26<22:04, 3.73s/it] 32%|███▏ | 166/520 [10:30<21:59, 3.73s/it] {'loss': 1.257, 'grad_norm': 0.0007862962750361389, 'learning_rate': 0.23907302783602521, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:30<21:59, 3.73s/it] 32%|███▏ | 167/520 [10:33<21:54, 3.72s/it] {'loss': 1.2616, 'grad_norm': 0.0007323537629698802, 'learning_rate': 0.2383190051149807, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:33<21:54, 3.72s/it] 32%|███▏ | 168/520 [10:37<21:49, 3.72s/it] {'loss': 1.1929, 'grad_norm': 0.0007360626198630537, 'learning_rate': 0.2375615508352185, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:37<21:49, 3.72s/it] 32%|███▎ | 169/520 [10:41<21:49, 3.73s/it] {'loss': 1.2782, 'grad_norm': 0.0006905239887568875, 'learning_rate': 0.23680069442697088, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:41<21:49, 3.73s/it] 33%|███▎ | 170/520 [10:45<21:44, 3.73s/it] {'loss': 1.2672, 'grad_norm': 0.000698067661337432, 'learning_rate': 0.23603646545265689, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:45<21:44, 3.73s/it] 33%|███▎ | 171/520 [10:48<21:40, 3.73s/it] {'loss': 1.2135, 'grad_norm': 0.0008032690551563478, 'learning_rate': 0.23526889360573386, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:48<21:40, 3.73s/it] 33%|███▎ | 172/520 [10:52<21:45, 3.75s/it] {'loss': 1.2926, 'grad_norm': 0.0007026800821942349, 'learning_rate': 0.2344980087095433, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:52<21:45, 3.75s/it] 33%|███▎ | 173/520 [10:56<21:54, 3.79s/it] {'loss': 1.2196, 'grad_norm': 0.0006721800202283178, 'learning_rate': 0.2337238407161526, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:56<21:54, 3.79s/it] 33%|███▎ | 174/520 [11:00<22:15, 3.86s/it] {'loss': 1.2812, 'grad_norm': 0.0007338182029545225, 'learning_rate': 0.2329464197051909, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:00<22:15, 3.86s/it] 34%|███▎ | 175/520 [11:04<22:21, 3.89s/it] {'loss': 1.1955, 'grad_norm': 0.0009294565334333347, 'learning_rate': 0.2321657758826807, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:04<22:21, 3.89s/it] 34%|███▍ | 176/520 [11:08<22:21, 3.90s/it] {'loss': 1.3545, 'grad_norm': 0.0008318419847921101, 'learning_rate': 0.23138193957986392, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:08<22:21, 3.90s/it] 34%|███▍ | 177/520 [11:12<22:22, 3.91s/it] {'loss': 1.2202, 'grad_norm': 0.000718711275865634, 'learning_rate': 0.2305949412520236, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:12<22:22, 3.91s/it] 34%|███▍ | 178/520 [11:16<22:19, 3.92s/it] {'loss': 1.2596, 'grad_norm': 0.000704746386284455, 'learning_rate': 0.22980481147730047, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:16<22:19, 3.92s/it] 34%|███▍ | 179/520 [11:20<22:18, 3.93s/it] {'loss': 1.3469, 'grad_norm': 0.0006390991461702781, 'learning_rate': 0.22901158095550508, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:20<22:18, 3.93s/it] 35%|███▍ | 180/520 [11:23<21:53, 3.86s/it] {'loss': 1.2544, 'grad_norm': 0.0007837632238072495, 'learning_rate': 0.2282152805069247, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:23<21:53, 3.86s/it] 35%|███▍ | 181/520 [11:27<21:34, 3.82s/it] {'loss': 1.2429, 'grad_norm': 0.0006093593750922487, 'learning_rate': 0.22741594107112598, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:27<21:34, 3.82s/it] 35%|███▌ | 182/520 [11:31<21:21, 3.79s/it] {'loss': 1.2495, 'grad_norm': 0.0006957830018653953, 'learning_rate': 0.22661359370575285, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:31<21:21, 3.79s/it] 35%|███▌ | 183/520 [11:35<21:11, 3.77s/it] {'loss': 1.2669, 'grad_norm': 0.0006502537603082682, 'learning_rate': 0.22580826958531963, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:35<21:11, 3.77s/it] 35%|███▌ | 184/520 [11:38<21:01, 3.75s/it] {'loss': 1.192, 'grad_norm': 0.0006670894337739256, 'learning_rate': 0.22499999999999998, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:38<21:01, 3.75s/it] 36%|███▌ | 185/520 [11:42<20:58, 3.76s/it] {'loss': 1.3493, 'grad_norm': 0.000638183825132948, 'learning_rate': 0.2241888163544111, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:42<20:58, 3.76s/it] 36%|███▌ | 186/520 [11:46<20:50, 3.74s/it] {'loss': 1.2229, 'grad_norm': 0.0006412450355780479, 'learning_rate': 0.2233747501663934, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:46<20:50, 3.74s/it] 36%|███▌ | 187/520 [11:49<20:41, 3.73s/it] {'loss': 1.221, 'grad_norm': 0.0007156197441574896, 'learning_rate': 0.22255783306578597, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:49<20:41, 3.73s/it] 36%|███▌ | 188/520 [11:53<20:33, 3.72s/it] {'loss': 1.3163, 'grad_norm': 0.0006757412572673914, 'learning_rate': 0.22173809679319773, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:53<20:33, 3.72s/it] 36%|███▋ | 189/520 [11:57<20:28, 3.71s/it] {'loss': 1.3144, 'grad_norm': 0.0006905531944494443, 'learning_rate': 0.22091557319877406, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:57<20:28, 3.71s/it] 37%|███▋ | 190/520 [12:01<20:25, 3.71s/it] {'loss': 1.2314, 'grad_norm': 0.000661379542311579, 'learning_rate': 0.2200902942409593, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:01<20:25, 3.71s/it] 37%|███▋ | 191/520 [12:04<20:20, 3.71s/it] {'loss': 1.1994, 'grad_norm': 0.0006252034082702029, 'learning_rate': 0.21926229198525513, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:04<20:20, 3.71s/it] 37%|███▋ | 192/520 [12:08<20:20, 3.72s/it] {'loss': 1.2793, 'grad_norm': 0.0006079001354692512, 'learning_rate': 0.21843159860297445, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:08<20:20, 3.72s/it] 37%|███▋ | 193/520 [12:12<20:13, 3.71s/it] {'loss': 1.2716, 'grad_norm': 0.000712517320477625, 'learning_rate': 0.2175982463699918, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:12<20:13, 3.71s/it] 37%|███▋ | 194/520 [12:15<20:10, 3.71s/it] {'loss': 1.1668, 'grad_norm': 0.000602793438590997, 'learning_rate': 0.2167622676654888, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:15<20:10, 3.71s/it] 38%|███▊ | 195/520 [12:19<20:13, 3.74s/it] {'loss': 1.2935, 'grad_norm': 0.0006606438612865937, 'learning_rate': 0.21592369497069674, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:19<20:13, 3.74s/it] 38%|███▊ | 196/520 [12:23<20:08, 3.73s/it] {'loss': 1.2608, 'grad_norm': 0.0006991841338036364, 'learning_rate': 0.2150825608676337, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:23<20:08, 3.73s/it] 38%|███▊ | 197/520 [12:27<19:59, 3.71s/it] {'loss': 1.2082, 'grad_norm': 0.0006839089478040306, 'learning_rate': 0.21423889803783938, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:27<19:59, 3.71s/it] 38%|███▊ | 198/520 [12:30<19:53, 3.71s/it] {'loss': 1.2851, 'grad_norm': 0.0006847627494096465, 'learning_rate': 0.2133927392611049, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:30<19:53, 3.71s/it] 38%|███▊ | 199/520 [12:34<19:50, 3.71s/it] {'loss': 1.198, 'grad_norm': 0.0006482300155879844, 'learning_rate': 0.21254411741419924, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:34<19:50, 3.71s/it] 38%|███▊ | 200/520 [12:38<19:51, 3.72s/it] {'loss': 1.2301, 'grad_norm': 0.0006846457139463406, 'learning_rate': 0.21169306546959177, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:38<19:51, 3.72s/it] 39%|███▊ | 201/520 [12:41<19:48, 3.72s/it] {'loss': 1.2449, 'grad_norm': 0.0006295606137194462, 'learning_rate': 0.21083961649417127, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:41<19:48, 3.72s/it] 39%|███▉ | 202/520 [12:45<19:42, 3.72s/it] {'loss': 1.2148, 'grad_norm': 0.0006580927657926894, 'learning_rate': 0.20998380364796113, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:45<19:42, 3.72s/it] 39%|███▉ | 203/520 [12:49<19:40, 3.72s/it] {'loss': 1.2634, 'grad_norm': 0.0007409713548595773, 'learning_rate': 0.2091256601828309, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:49<19:40, 3.72s/it] 39%|███▉ | 204/520 [12:53<19:34, 3.72s/it] {'loss': 1.2712, 'grad_norm': 0.0006166447505787298, 'learning_rate': 0.2082652194412042, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:53<19:34, 3.72s/it] 39%|███▉ | 205/520 [12:56<19:31, 3.72s/it] {'loss': 1.2508, 'grad_norm': 0.0006624203115021054, 'learning_rate': 0.20740251485476346, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:56<19:31, 3.72s/it] 40%|███▉ | 206/520 [13:00<19:24, 3.71s/it] {'loss': 1.3156, 'grad_norm': 0.0006852644472254119, 'learning_rate': 0.20653757994315078, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:00<19:24, 3.71s/it] 40%|███▉ | 207/520 [13:04<19:22, 3.71s/it] {'loss': 1.225, 'grad_norm': 0.0007203651530620867, 'learning_rate': 0.20567044831266568, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:04<19:22, 3.71s/it] 40%|████ | 208/520 [13:07<19:15, 3.70s/it] {'loss': 1.2954, 'grad_norm': 0.000689052029784212, 'learning_rate': 0.2048011536549593, 'epoch': 0.4} + 40%|████ | 208/520 [13:07<19:15, 3.70s/it] 40%|████ | 209/520 [13:11<19:10, 3.70s/it] {'loss': 1.2089, 'grad_norm': 0.0005846174754784587, 'learning_rate': 0.20392972974572512, 'epoch': 0.4} + 40%|████ | 209/520 [13:11<19:10, 3.70s/it] 40%|████ | 210/520 [13:15<19:03, 3.69s/it] {'loss': 1.2808, 'grad_norm': 0.000635990492523288, 'learning_rate': 0.2030562104433872, 'epoch': 0.4} + 40%|████ | 210/520 [13:15<19:03, 3.69s/it] 41%|████ | 211/520 [13:18<19:03, 3.70s/it] {'loss': 1.2905, 'grad_norm': 0.0005728449348503931, 'learning_rate': 0.20218062968778405, 'epoch': 0.41} + 41%|████ | 211/520 [13:18<19:03, 3.70s/it] 41%|████ | 212/520 [13:22<18:59, 3.70s/it] {'loss': 1.2828, 'grad_norm': 0.0007746318738282925, 'learning_rate': 0.20130302149885032, 'epoch': 0.41} + 41%|████ | 212/520 [13:22<18:59, 3.70s/it] 41%|████ | 213/520 [13:26<18:54, 3.70s/it] {'loss': 1.2265, 'grad_norm': 0.0008116100803650473, 'learning_rate': 0.20042341997529464, 'epoch': 0.41} + 41%|████ | 213/520 [13:26<18:54, 3.70s/it] 41%|████ | 214/520 [13:30<18:51, 3.70s/it] {'loss': 1.2258, 'grad_norm': 0.0010431081527795178, 'learning_rate': 0.1995418592932751, 'epoch': 0.41} + 41%|████ | 214/520 [13:30<18:51, 3.70s/it] 41%|████▏ | 215/520 [13:33<18:51, 3.71s/it] {'loss': 1.1809, 'grad_norm': 0.0006332352269356108, 'learning_rate': 0.19865837370507108, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:33<18:51, 3.71s/it] 42%|████▏ | 216/520 [13:37<18:50, 3.72s/it] {'loss': 1.1411, 'grad_norm': 0.000680583686841017, 'learning_rate': 0.19777299753775268, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:37<18:50, 3.72s/it] 42%|████▏ | 217/520 [13:41<18:51, 3.73s/it] {'loss': 1.2761, 'grad_norm': 0.0006250096633446595, 'learning_rate': 0.19688576519184667, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:41<18:51, 3.73s/it] 42%|████▏ | 218/520 [13:45<18:45, 3.73s/it] {'loss': 1.2579, 'grad_norm': 0.0006684494535943249, 'learning_rate': 0.19599671114000014, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:45<18:45, 3.73s/it] 42%|████▏ | 219/520 [13:48<18:44, 3.74s/it] {'loss': 1.2651, 'grad_norm': 0.0005842002857274215, 'learning_rate': 0.19510586992564094, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:48<18:44, 3.74s/it] 42%|████▏ | 220/520 [13:52<18:43, 3.75s/it] {'loss': 1.2288, 'grad_norm': 0.0006231743690254106, 'learning_rate': 0.19421327616163564, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:52<18:43, 3.75s/it] 42%|████▎ | 221/520 [13:56<18:37, 3.74s/it] {'loss': 1.2555, 'grad_norm': 0.0005807163831410637, 'learning_rate': 0.19331896452894448, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:56<18:37, 3.74s/it] 43%|████▎ | 222/520 [14:00<18:34, 3.74s/it] {'loss': 1.1973, 'grad_norm': 0.0006293629796457467, 'learning_rate': 0.19242296977527412, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:00<18:34, 3.74s/it] 43%|████▎ | 223/520 [14:03<18:32, 3.75s/it] {'loss': 1.1815, 'grad_norm': 0.0005899842089027379, 'learning_rate': 0.19152532671372738, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:03<18:32, 3.75s/it] 43%|████▎ | 224/520 [14:07<18:25, 3.74s/it] {'loss': 1.3409, 'grad_norm': 0.0007331325618137576, 'learning_rate': 0.19062607022145078, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:07<18:25, 3.74s/it] 43%|████▎ | 225/520 [14:11<18:17, 3.72s/it] {'loss': 1.1955, 'grad_norm': 0.0006125661190103565, 'learning_rate': 0.18972523523827908, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:11<18:17, 3.72s/it] 43%|████▎ | 226/520 [14:14<18:09, 3.71s/it] {'loss': 1.2988, 'grad_norm': 0.0006115937817315364, 'learning_rate': 0.1888228567653781, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:14<18:09, 3.71s/it] 44%|████▎ | 227/520 [14:18<18:08, 3.72s/it] {'loss': 1.2806, 'grad_norm': 0.0005918921571098744, 'learning_rate': 0.1879189698638846, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:18<18:08, 3.72s/it] 44%|████▍ | 228/520 [14:22<18:04, 3.71s/it] {'loss': 1.3494, 'grad_norm': 0.000611816498396283, 'learning_rate': 0.18701360965354402, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:22<18:04, 3.71s/it] 44%|████▍ | 229/520 [14:26<18:02, 3.72s/it] {'loss': 1.257, 'grad_norm': 0.0005330874041035921, 'learning_rate': 0.18610681131134596, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:26<18:02, 3.72s/it] 44%|████▍ | 230/520 [14:29<18:00, 3.73s/it] {'loss': 1.1513, 'grad_norm': 0.0006255723290433943, 'learning_rate': 0.18519861007015728, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:29<18:00, 3.73s/it] 44%|████▍ | 231/520 [14:33<17:59, 3.74s/it] {'loss': 1.212, 'grad_norm': 0.000655157302426937, 'learning_rate': 0.18428904121735343, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:33<17:59, 3.74s/it] 45%|████▍ | 232/520 [14:37<17:56, 3.74s/it] {'loss': 1.3826, 'grad_norm': 0.0006461945364916074, 'learning_rate': 0.18337814009344713, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:37<17:56, 3.74s/it] 45%|████▍ | 233/520 [14:40<17:49, 3.73s/it] {'loss': 1.2709, 'grad_norm': 0.00063246826525756, 'learning_rate': 0.18246594209071543, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:40<17:49, 3.73s/it] 45%|████▌ | 234/520 [14:44<17:37, 3.70s/it] {'loss': 1.1699, 'grad_norm': 0.0006142510989549692, 'learning_rate': 0.18155248265182436, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:44<17:37, 3.70s/it] 45%|████▌ | 235/520 [14:48<17:31, 3.69s/it] {'loss': 1.2138, 'grad_norm': 0.0005869944289596179, 'learning_rate': 0.18063779726845205, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:48<17:31, 3.69s/it] 45%|████▌ | 236/520 [14:51<17:23, 3.67s/it] {'loss': 1.3066, 'grad_norm': 0.0005626427696154124, 'learning_rate': 0.17972192147990965, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:51<17:23, 3.67s/it] 46%|████▌ | 237/520 [14:55<17:20, 3.68s/it] {'loss': 1.2805, 'grad_norm': 0.0005730229397935861, 'learning_rate': 0.17880489087176044, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:55<17:20, 3.68s/it] 46%|████▌ | 238/520 [14:59<17:15, 3.67s/it] {'loss': 1.2242, 'grad_norm': 0.0006124572978403833, 'learning_rate': 0.17788674107443722, 'epoch': 0.46} + 46%|████▌ | 238/520 [14:59<17:15, 3.67s/it] 46%|████▌ | 239/520 [15:02<17:17, 3.69s/it] {'loss': 1.3093, 'grad_norm': 0.0006542736332894213, 'learning_rate': 0.1769675077618579, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:02<17:17, 3.69s/it] 46%|████▌ | 240/520 [15:06<17:08, 3.67s/it] {'loss': 1.106, 'grad_norm': 0.000506491107841737, 'learning_rate': 0.17604722665003958, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:06<17:08, 3.67s/it] 46%|████▋ | 241/520 [15:10<17:06, 3.68s/it] {'loss': 1.1842, 'grad_norm': 0.0005771817024085484, 'learning_rate': 0.17512593349571046, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:10<17:06, 3.68s/it] 47%|████▋ | 242/520 [15:13<17:01, 3.67s/it] {'loss': 1.2066, 'grad_norm': 0.0005454249615095362, 'learning_rate': 0.174203664094921, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:13<17:01, 3.67s/it] 47%|████▋ | 243/520 [15:17<16:59, 3.68s/it] {'loss': 1.204, 'grad_norm': 0.0006282395791877252, 'learning_rate': 0.17328045428165273, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:17<16:59, 3.68s/it] 47%|████▋ | 244/520 [15:21<17:00, 3.70s/it] {'loss': 1.3166, 'grad_norm': 0.0006514866442404403, 'learning_rate': 0.17235633992642616, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:21<17:00, 3.70s/it] 47%|████▋ | 245/520 [15:25<17:14, 3.76s/it] {'loss': 1.1935, 'grad_norm': 0.0005920422778504281, 'learning_rate': 0.171431356934907, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:25<17:14, 3.76s/it] 47%|████▋ | 246/520 [15:29<17:20, 3.80s/it] {'loss': 1.3614, 'grad_norm': 0.0006054375824873339, 'learning_rate': 0.17050554124651102, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:29<17:20, 3.80s/it] 48%|████▊ | 247/520 [15:33<17:22, 3.82s/it] {'loss': 1.3589, 'grad_norm': 0.0008222555993355408, 'learning_rate': 0.16957892883300776, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:33<17:22, 3.82s/it] 48%|████▊ | 248/520 [15:36<17:23, 3.84s/it] {'loss': 1.1823, 'grad_norm': 0.0005768480275952951, 'learning_rate': 0.16865155569712278, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:36<17:23, 3.84s/it] 48%|████▊ | 249/520 [15:40<17:29, 3.87s/it] {'loss': 1.284, 'grad_norm': 0.0006360822060360292, 'learning_rate': 0.16772345787113893, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:40<17:29, 3.87s/it] 48%|████▊ | 250/520 [15:44<17:23, 3.86s/it] {'loss': 1.2297, 'grad_norm': 0.0006074429137404222, 'learning_rate': 0.16679467141549617, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:44<17:23, 3.86s/it] 48%|████▊ | 251/520 [15:48<17:16, 3.85s/it] {'loss': 1.2902, 'grad_norm': 0.000538861963412717, 'learning_rate': 0.16586523241739068, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:48<17:16, 3.85s/it] 48%|████▊ | 252/520 [15:52<17:02, 3.81s/it] {'loss': 1.2533, 'grad_norm': 0.0005749965282948372, 'learning_rate': 0.16493517698937252, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:52<17:02, 3.81s/it] 49%|████▊ | 253/520 [15:56<17:04, 3.84s/it] {'loss': 1.2705, 'grad_norm': 0.0006252380827628648, 'learning_rate': 0.1640045412679426, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:56<17:04, 3.84s/it] 49%|████▉ | 254/520 [16:00<17:12, 3.88s/it] {'loss': 1.2087, 'grad_norm': 0.0006162062189956688, 'learning_rate': 0.16307336141214876, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:00<17:12, 3.88s/it] 49%|████▉ | 255/520 [16:04<17:13, 3.90s/it] {'loss': 1.2171, 'grad_norm': 0.0005739834969239903, 'learning_rate': 0.16214167360218049, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:04<17:13, 3.90s/it] 49%|████▉ | 256/520 [16:07<17:07, 3.89s/it] {'loss': 1.2682, 'grad_norm': 0.000620057552480905, 'learning_rate': 0.16120951403796366, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:07<17:07, 3.89s/it] 49%|████▉ | 257/520 [16:11<17:03, 3.89s/it] {'loss': 1.2574, 'grad_norm': 0.0007072331708027907, 'learning_rate': 0.1602769189377535, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:11<17:03, 3.89s/it] 50%|████▉ | 258/520 [16:15<16:58, 3.89s/it] {'loss': 1.2661, 'grad_norm': 0.0004891032013527551, 'learning_rate': 0.15934392453672783, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:15<16:58, 3.89s/it] 50%|████▉ | 259/520 [16:19<16:55, 3.89s/it] {'loss': 1.3355, 'grad_norm': 0.0006111970765517817, 'learning_rate': 0.15841056708557877, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:19<16:55, 3.89s/it] 50%|█████ | 260/520 [16:23<16:51, 3.89s/it] {'loss': 1.3207, 'grad_norm': 0.000600726465253628, 'learning_rate': 0.15747688284910458, 'epoch': 0.5} + 50%|█████ | 260/520 [16:23<16:51, 3.89s/it] 50%|█████ | 261/520 [16:27<16:42, 3.87s/it] {'loss': 1.2648, 'grad_norm': 0.0005475145613167425, 'learning_rate': 0.1565429081048004, 'epoch': 0.5} + 50%|█████ | 261/520 [16:27<16:42, 3.87s/it] 50%|█████ | 262/520 [16:31<16:32, 3.85s/it] {'loss': 1.1957, 'grad_norm': 0.0005491161233602965, 'learning_rate': 0.15560867914144888, 'epoch': 0.5} + 50%|█████ | 262/520 [16:31<16:32, 3.85s/it] 51%|█████ | 263/520 [16:34<16:17, 3.80s/it] {'loss': 1.2718, 'grad_norm': 0.0005687262396598045, 'learning_rate': 0.15467423225770996, 'epoch': 0.51} + 51%|█████ | 263/520 [16:34<16:17, 3.80s/it] 51%|█████ | 264/520 [16:38<16:07, 3.78s/it] {'loss': 1.2923, 'grad_norm': 0.0005229587216568355, 'learning_rate': 0.15373960376071094, 'epoch': 0.51} + 51%|█████ | 264/520 [16:38<16:07, 3.78s/it] 51%|█████ | 265/520 [16:42<15:59, 3.76s/it] {'loss': 1.2053, 'grad_norm': 0.0005988302169252886, 'learning_rate': 0.15280482996463532, 'epoch': 0.51} + 51%|█████ | 265/520 [16:42<15:59, 3.76s/it] 51%|█████ | 266/520 [16:45<15:48, 3.73s/it] {'loss': 1.0762, 'grad_norm': 0.0004509978897506662, 'learning_rate': 0.15186994718931227, 'epoch': 0.51} + 51%|█████ | 266/520 [16:45<15:48, 3.73s/it] 51%|█████▏ | 267/520 [16:49<15:46, 3.74s/it] {'loss': 1.2068, 'grad_norm': 0.0005150396115219574, 'learning_rate': 0.15093499175880504, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:49<15:46, 3.74s/it] 52%|█████▏ | 268/520 [16:53<15:38, 3.72s/it] {'loss': 1.3903, 'grad_norm': 0.00060196616427656, 'learning_rate': 0.15, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:53<15:38, 3.72s/it] 52%|█████▏ | 269/520 [16:57<15:30, 3.71s/it] {'loss': 1.3062, 'grad_norm': 0.0005812999209067573, 'learning_rate': 0.14906500824119495, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:57<15:30, 3.71s/it] 52%|█████▏ | 270/520 [17:00<15:26, 3.71s/it] {'loss': 1.2077, 'grad_norm': 0.0005754777978684217, 'learning_rate': 0.14813005281068775, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:00<15:26, 3.71s/it] 52%|█████▏ | 271/520 [17:04<15:23, 3.71s/it] {'loss': 1.286, 'grad_norm': 0.0005509089013027948, 'learning_rate': 0.1471951700353647, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:04<15:23, 3.71s/it] 52%|█████▏ | 272/520 [17:08<15:18, 3.70s/it] {'loss': 1.2222, 'grad_norm': 0.000525180904087209, 'learning_rate': 0.14626039623928908, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:08<15:18, 3.70s/it] 52%|█████▎ | 273/520 [17:11<15:16, 3.71s/it] {'loss': 1.3365, 'grad_norm': 0.0008058433838134339, 'learning_rate': 0.14532576774229006, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:11<15:16, 3.71s/it] 53%|█████▎ | 274/520 [17:15<15:09, 3.70s/it] {'loss': 1.2602, 'grad_norm': 0.000571287917028723, 'learning_rate': 0.14439132085855116, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:15<15:09, 3.70s/it] 53%|█████▎ | 275/520 [17:19<15:06, 3.70s/it] {'loss': 1.203, 'grad_norm': 0.0005329604931415541, 'learning_rate': 0.14345709189519962, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:19<15:06, 3.70s/it] 53%|█████▎ | 276/520 [17:22<14:59, 3.68s/it] {'loss': 1.2706, 'grad_norm': 0.0005851938801991846, 'learning_rate': 0.1425231171508954, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:22<14:59, 3.68s/it] 53%|█████▎ | 277/520 [17:26<14:56, 3.69s/it] {'loss': 1.3462, 'grad_norm': 0.0005181252021405477, 'learning_rate': 0.14158943291442122, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:26<14:56, 3.69s/it] 53%|█████▎ | 278/520 [17:30<14:52, 3.69s/it] {'loss': 1.163, 'grad_norm': 0.0005050263451430352, 'learning_rate': 0.1406560754632722, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:30<14:52, 3.69s/it] 54%|█████▎ | 279/520 [17:34<14:47, 3.68s/it] {'loss': 1.2228, 'grad_norm': 0.000579132659172911, 'learning_rate': 0.13972308106224648, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:34<14:47, 3.68s/it] 54%|█████▍ | 280/520 [17:37<14:48, 3.70s/it] {'loss': 1.2008, 'grad_norm': 0.0005990603377487607, 'learning_rate': 0.13879048596203636, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:37<14:48, 3.70s/it] 54%|█████▍ | 281/520 [17:41<14:48, 3.72s/it] {'loss': 1.3076, 'grad_norm': 0.0005809612154919015, 'learning_rate': 0.1378583263978195, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:41<14:48, 3.72s/it] 54%|█████▍ | 282/520 [17:45<14:40, 3.70s/it] {'loss': 1.1828, 'grad_norm': 0.0005738801433227361, 'learning_rate': 0.13692663858785126, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:45<14:40, 3.70s/it] 54%|█████▍ | 283/520 [17:48<14:45, 3.74s/it] {'loss': 1.3293, 'grad_norm': 0.0006077691815025789, 'learning_rate': 0.1359954587320574, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:48<14:45, 3.74s/it] 55%|█████▍ | 284/520 [17:52<14:35, 3.71s/it] {'loss': 1.222, 'grad_norm': 0.0006900870280908844, 'learning_rate': 0.13506482301062753, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:52<14:35, 3.71s/it] 55%|█████▍ | 285/520 [17:56<14:30, 3.71s/it] {'loss': 1.2006, 'grad_norm': 0.0005589511804661997, 'learning_rate': 0.13413476758260934, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:56<14:30, 3.71s/it] 55%|█████▌ | 286/520 [18:00<14:25, 3.70s/it] {'loss': 1.0736, 'grad_norm': 0.0005265942924112497, 'learning_rate': 0.13320532858450382, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:00<14:25, 3.70s/it] 55%|█████▌ | 287/520 [18:03<14:21, 3.70s/it] {'loss': 1.3189, 'grad_norm': 0.000526551985048594, 'learning_rate': 0.13227654212886109, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:03<14:21, 3.70s/it] 55%|█████▌ | 288/520 [18:07<14:18, 3.70s/it] {'loss': 1.3478, 'grad_norm': 0.0005233306103215535, 'learning_rate': 0.13134844430287726, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:07<14:18, 3.70s/it] 56%|█████▌ | 289/520 [18:11<14:18, 3.72s/it] {'loss': 1.2126, 'grad_norm': 0.0005349320001906484, 'learning_rate': 0.13042107116699228, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:11<14:18, 3.72s/it] 56%|█████▌ | 290/520 [18:14<14:14, 3.71s/it] {'loss': 1.14, 'grad_norm': 0.0004901529937088321, 'learning_rate': 0.129494458753489, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:14<14:14, 3.71s/it] 56%|█████▌ | 291/520 [18:18<14:11, 3.72s/it] {'loss': 1.1857, 'grad_norm': 0.0005782323671309642, 'learning_rate': 0.12856864306509302, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:18<14:11, 3.72s/it] 56%|█████▌ | 292/520 [18:22<14:07, 3.72s/it] {'loss': 1.2391, 'grad_norm': 0.0005029528400985024, 'learning_rate': 0.1276436600735738, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:22<14:07, 3.72s/it] 56%|█████▋ | 293/520 [18:26<14:01, 3.71s/it] {'loss': 1.1869, 'grad_norm': 0.0005556831378277247, 'learning_rate': 0.12671954571834726, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:26<14:01, 3.71s/it] 57%|█████▋ | 294/520 [18:29<13:59, 3.72s/it] {'loss': 1.2083, 'grad_norm': 0.0005438304421619928, 'learning_rate': 0.125796335905079, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:29<13:59, 3.72s/it] 57%|█████▋ | 295/520 [18:33<13:52, 3.70s/it] {'loss': 1.2786, 'grad_norm': 0.0005547323488275036, 'learning_rate': 0.12487406650428955, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:33<13:52, 3.70s/it] 57%|█████▋ | 296/520 [18:37<13:48, 3.70s/it] {'loss': 1.165, 'grad_norm': 0.0006234192333225441, 'learning_rate': 0.12395277334996045, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:37<13:48, 3.70s/it] 57%|█████▋ | 297/520 [18:40<13:44, 3.70s/it] {'loss': 1.3001, 'grad_norm': 0.0006468021531718395, 'learning_rate': 0.1230324922381421, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:40<13:44, 3.70s/it] 57%|█████▋ | 298/520 [18:44<13:40, 3.70s/it] {'loss': 1.2541, 'grad_norm': 0.0004903700172754083, 'learning_rate': 0.12211325892556281, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:44<13:40, 3.70s/it] 57%|█████▊ | 299/520 [18:48<13:38, 3.70s/it] {'loss': 1.3083, 'grad_norm': 0.0005404081590621208, 'learning_rate': 0.12119510912823958, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:48<13:38, 3.70s/it] 58%|█████▊ | 300/520 [18:51<13:35, 3.71s/it] {'loss': 1.3089, 'grad_norm': 0.0005534205268832087, 'learning_rate': 0.12027807852009038, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:51<13:35, 3.71s/it] 58%|█████▊ | 301/520 [18:55<13:30, 3.70s/it] {'loss': 1.2913, 'grad_norm': 0.0005479215809084013, 'learning_rate': 0.11936220273154796, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:55<13:30, 3.70s/it] 58%|█████▊ | 302/520 [18:59<13:27, 3.70s/it] {'loss': 1.3174, 'grad_norm': 0.0005375025280167627, 'learning_rate': 0.11844751734817566, 'epoch': 0.58} + 58%|█████▊ | 302/520 [18:59<13:27, 3.70s/it] 58%|█████▊ | 303/520 [19:03<13:23, 3.70s/it] {'loss': 1.2155, 'grad_norm': 0.000651242562502845, 'learning_rate': 0.11753405790928456, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:03<13:23, 3.70s/it] 58%|█████▊ | 304/520 [19:06<13:20, 3.71s/it] {'loss': 1.2109, 'grad_norm': 0.0005618261434094198, 'learning_rate': 0.11662185990655284, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:06<13:20, 3.71s/it] 59%|█████▊ | 305/520 [19:10<13:22, 3.73s/it] {'loss': 1.3266, 'grad_norm': 0.0006967008025908621, 'learning_rate': 0.11571095878264659, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:10<13:22, 3.73s/it] 59%|█████▉ | 306/520 [19:14<13:17, 3.73s/it] {'loss': 1.2657, 'grad_norm': 0.0005472809204920279, 'learning_rate': 0.11480138992984275, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:14<13:17, 3.73s/it] 59%|█████▉ | 307/520 [19:17<13:10, 3.71s/it] {'loss': 1.2077, 'grad_norm': 0.0005454622713429734, 'learning_rate': 0.11389318868865408, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:17<13:10, 3.71s/it] 59%|█████▉ | 308/520 [19:21<13:13, 3.74s/it] {'loss': 1.3223, 'grad_norm': 0.0005153580716865462, 'learning_rate': 0.11298639034645594, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:21<13:13, 3.74s/it] 59%|█████▉ | 309/520 [19:26<13:55, 3.96s/it] {'loss': 1.2053, 'grad_norm': 0.0005315300571093536, 'learning_rate': 0.11208103013611535, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:26<13:55, 3.96s/it] 60%|█████▉ | 310/520 [19:30<13:44, 3.93s/it] {'loss': 1.1844, 'grad_norm': 0.0005503698452786411, 'learning_rate': 0.11117714323462187, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:30<13:44, 3.93s/it] 60%|█████▉ | 311/520 [19:33<13:33, 3.89s/it] {'loss': 1.1666, 'grad_norm': 0.0006104568607397761, 'learning_rate': 0.1102747647617209, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:33<13:33, 3.89s/it] 60%|██████ | 312/520 [19:38<13:44, 3.97s/it] {'loss': 1.1551, 'grad_norm': 0.0005618356330287612, 'learning_rate': 0.10937392977854925, 'epoch': 0.6} + 60%|██████ | 312/520 [19:38<13:44, 3.97s/it] 60%|██████ | 313/520 [19:41<13:42, 3.97s/it] {'loss': 1.133, 'grad_norm': 0.00048360919909464607, 'learning_rate': 0.1084746732862726, 'epoch': 0.6} + 60%|██████ | 313/520 [19:42<13:42, 3.97s/it] 60%|██████ | 314/520 [19:46<13:55, 4.05s/it] {'loss': 1.1775, 'grad_norm': 0.0005269204372912828, 'learning_rate': 0.10757703022472588, 'epoch': 0.6} + 60%|██████ | 314/520 [19:46<13:55, 4.05s/it] 61%|██████ | 315/520 [19:50<13:43, 4.02s/it] {'loss': 1.2826, 'grad_norm': 0.0005825796732867206, 'learning_rate': 0.10668103547105554, 'epoch': 0.61} + 61%|██████ | 315/520 [19:50<13:43, 4.02s/it] 61%|██████ | 316/520 [19:54<13:59, 4.11s/it] {'loss': 1.1511, 'grad_norm': 0.0005062492794146666, 'learning_rate': 0.10578672383836436, 'epoch': 0.61} + 61%|██████ | 316/520 [19:54<13:59, 4.11s/it] 61%|██████ | 317/520 [19:58<13:31, 4.00s/it] {'loss': 1.1654, 'grad_norm': 0.00047985360498415194, 'learning_rate': 0.10489413007435905, 'epoch': 0.61} + 61%|██████ | 317/520 [19:58<13:31, 4.00s/it] 61%|██████ | 318/520 [20:01<13:09, 3.91s/it] {'loss': 1.2812, 'grad_norm': 0.0006546390902158947, 'learning_rate': 0.10400328885999988, 'epoch': 0.61} + 61%|██████ | 318/520 [20:01<13:09, 3.91s/it] 61%|██████▏ | 319/520 [20:06<13:15, 3.96s/it] {'loss': 1.1635, 'grad_norm': 0.0007089301375641194, 'learning_rate': 0.10311423480815335, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:06<13:15, 3.96s/it] 62%|██████▏ | 320/520 [20:09<12:58, 3.89s/it] {'loss': 1.1066, 'grad_norm': 0.0005201918459071926, 'learning_rate': 0.10222700246224735, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:09<12:58, 3.89s/it] 62%|██████▏ | 321/520 [20:13<13:02, 3.93s/it] {'loss': 1.3029, 'grad_norm': 0.0005327907965599651, 'learning_rate': 0.10134162629492895, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:13<13:02, 3.93s/it] 62%|██████▏ | 322/520 [20:17<12:44, 3.86s/it] {'loss': 1.1644, 'grad_norm': 0.0005186672336778274, 'learning_rate': 0.10045814070672499, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:17<12:44, 3.86s/it] 62%|██████▏ | 323/520 [20:21<12:31, 3.81s/it] {'loss': 1.2325, 'grad_norm': 0.000601006259733402, 'learning_rate': 0.09957658002470542, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:21<12:31, 3.81s/it] 62%|██████▏ | 324/520 [20:24<12:18, 3.77s/it] {'loss': 1.2429, 'grad_norm': 0.000524724828452966, 'learning_rate': 0.0986969785011497, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:24<12:18, 3.77s/it] 62%|██████▎ | 325/520 [20:28<12:12, 3.76s/it] {'loss': 1.2415, 'grad_norm': 0.0007120648860207494, 'learning_rate': 0.0978193703122159, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:28<12:12, 3.76s/it] 63%|██████▎ | 326/520 [20:32<12:04, 3.74s/it] {'loss': 1.2353, 'grad_norm': 0.0005372858680295291, 'learning_rate': 0.09694378955661279, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:32<12:04, 3.74s/it] 63%|██████▎ | 327/520 [20:35<11:56, 3.71s/it] {'loss': 1.2927, 'grad_norm': 0.0006299313768125264, 'learning_rate': 0.09607027025427486, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:35<11:56, 3.71s/it] 63%|██████▎ | 328/520 [20:39<11:47, 3.69s/it] {'loss': 1.2975, 'grad_norm': 0.0005807468842286215, 'learning_rate': 0.09519884634504074, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:39<11:47, 3.69s/it] 63%|██████▎ | 329/520 [20:43<11:43, 3.69s/it] {'loss': 1.1592, 'grad_norm': 0.0004604719063178852, 'learning_rate': 0.09432955168733431, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:43<11:43, 3.69s/it] 63%|██████▎ | 330/520 [20:46<11:38, 3.67s/it] {'loss': 1.2393, 'grad_norm': 0.0004909689121209886, 'learning_rate': 0.09346242005684921, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:46<11:38, 3.67s/it] 64%|██████▎ | 331/520 [20:50<11:33, 3.67s/it] {'loss': 1.1973, 'grad_norm': 0.0005711209783996593, 'learning_rate': 0.09259748514523654, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:50<11:33, 3.67s/it] 64%|██████▍ | 332/520 [20:54<11:28, 3.66s/it] {'loss': 1.3044, 'grad_norm': 0.0005219060952824216, 'learning_rate': 0.09173478055879579, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:54<11:28, 3.66s/it] 64%|██████▍ | 333/520 [20:57<11:26, 3.67s/it] {'loss': 1.3404, 'grad_norm': 0.0005738161481088353, 'learning_rate': 0.09087433981716911, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:57<11:26, 3.67s/it] 64%|██████▍ | 334/520 [21:01<11:21, 3.66s/it] {'loss': 1.2464, 'grad_norm': 0.0005635011647747334, 'learning_rate': 0.09001619635203888, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:01<11:21, 3.66s/it] 64%|██████▍ | 335/520 [21:05<11:16, 3.66s/it] {'loss': 1.2494, 'grad_norm': 0.0005877509415309971, 'learning_rate': 0.08916038350582876, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:05<11:16, 3.66s/it] 65%|██████▍ | 336/520 [21:08<11:11, 3.65s/it] {'loss': 1.1396, 'grad_norm': 0.0005976284765181239, 'learning_rate': 0.08830693453040829, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:08<11:11, 3.65s/it] 65%|██████▍ | 337/520 [21:12<11:07, 3.65s/it] {'loss': 1.1319, 'grad_norm': 0.0005124350700708363, 'learning_rate': 0.08745588258580084, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:12<11:07, 3.65s/it] 65%|██████▌ | 338/520 [21:16<11:03, 3.65s/it] {'loss': 1.2541, 'grad_norm': 0.0005526695278449123, 'learning_rate': 0.0866072607388951, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:16<11:03, 3.65s/it] 65%|██████▌ | 339/520 [21:19<11:05, 3.68s/it] {'loss': 1.2042, 'grad_norm': 0.0005419443071157128, 'learning_rate': 0.08576110196216057, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:19<11:05, 3.68s/it] 65%|██████▌ | 340/520 [21:23<11:02, 3.68s/it] {'loss': 1.1825, 'grad_norm': 0.0005881682510307126, 'learning_rate': 0.08491743913236628, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:23<11:02, 3.68s/it] 66%|██████▌ | 341/520 [21:27<10:57, 3.67s/it] {'loss': 1.2048, 'grad_norm': 0.0005505856548736019, 'learning_rate': 0.08407630502930323, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:27<10:57, 3.67s/it] 66%|██████▌ | 342/520 [21:30<10:55, 3.69s/it] {'loss': 1.2763, 'grad_norm': 0.0006726297812825164, 'learning_rate': 0.08323773233451114, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:30<10:55, 3.69s/it] 66%|██████▌ | 343/520 [21:34<10:55, 3.70s/it] {'loss': 1.2445, 'grad_norm': 0.00048224088580270727, 'learning_rate': 0.08240175363000819, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:34<10:55, 3.70s/it] 66%|██████▌ | 344/520 [21:38<10:49, 3.69s/it] {'loss': 1.1612, 'grad_norm': 0.0004722536461852671, 'learning_rate': 0.08156840139702555, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:38<10:49, 3.69s/it] 66%|██████▋ | 345/520 [21:41<10:45, 3.69s/it] {'loss': 1.2745, 'grad_norm': 0.0005650668603321181, 'learning_rate': 0.08073770801474495, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:41<10:45, 3.69s/it] 67%|██████▋ | 346/520 [21:45<10:41, 3.69s/it] {'loss': 1.2424, 'grad_norm': 0.0005068034422424912, 'learning_rate': 0.07990970575904069, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:45<10:41, 3.69s/it] 67%|██████▋ | 347/520 [21:49<10:40, 3.70s/it] {'loss': 1.1871, 'grad_norm': 0.0005174368141655783, 'learning_rate': 0.07908442680122597, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:49<10:40, 3.70s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:53<10:36, 3.70s/it] {'loss': 1.1477, 'grad_norm': 0.0006132335530263992, 'learning_rate': 0.0782619032068023, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:53<10:36, 3.70s/it] 67%|██████▋ | 349/520 [21:56<10:32, 3.70s/it] {'loss': 1.1851, 'grad_norm': 0.0005648339297152099, 'learning_rate': 0.07744216693421403, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:56<10:32, 3.70s/it] 67%|██████▋ | 350/520 [22:00<10:28, 3.70s/it] {'loss': 1.224, 'grad_norm': 0.0005571349493673778, 'learning_rate': 0.07662524983360665, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:00<10:28, 3.70s/it] 68%|██████▊ | 351/520 [22:04<10:25, 3.70s/it] {'loss': 1.1336, 'grad_norm': 0.0005130449045749826, 'learning_rate': 0.07581118364558888, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:04<10:25, 3.70s/it] 68%|██████▊ | 352/520 [22:07<10:23, 3.71s/it] {'loss': 1.2506, 'grad_norm': 0.0005098527636414128, 'learning_rate': 0.07500000000000002, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:07<10:23, 3.71s/it] 68%|██████▊ | 353/520 [22:11<10:20, 3.72s/it] {'loss': 1.2143, 'grad_norm': 0.0004987706122404398, 'learning_rate': 0.07419173041468043, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:11<10:20, 3.72s/it] 68%|██████▊ | 354/520 [22:15<10:15, 3.71s/it] {'loss': 1.3226, 'grad_norm': 0.0005062985153885958, 'learning_rate': 0.0733864062942472, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:15<10:15, 3.71s/it] 68%|██████▊ | 355/520 [22:19<10:10, 3.70s/it] {'loss': 1.1918, 'grad_norm': 0.0005230145730723071, 'learning_rate': 0.07258405892887398, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:19<10:10, 3.70s/it] 68%|██████▊ | 356/520 [22:22<10:06, 3.70s/it] {'loss': 1.1887, 'grad_norm': 0.0005412830504957089, 'learning_rate': 0.0717847194930753, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:22<10:06, 3.70s/it] 69%|██████▊ | 357/520 [22:26<10:00, 3.68s/it] {'loss': 1.2202, 'grad_norm': 0.0004923750639875271, 'learning_rate': 0.07098841904449488, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:26<10:00, 3.68s/it] 69%|██████▉ | 358/520 [22:30<09:55, 3.67s/it] {'loss': 1.158, 'grad_norm': 0.000549607408194977, 'learning_rate': 0.07019518852269953, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:30<09:55, 3.67s/it] 69%|██████▉ | 359/520 [22:33<09:51, 3.67s/it] {'loss': 1.2604, 'grad_norm': 0.0005723356533690103, 'learning_rate': 0.0694050587479764, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:33<09:51, 3.67s/it] 69%|██████▉ | 360/520 [22:37<09:48, 3.68s/it] {'loss': 1.2826, 'grad_norm': 0.0006046014675218159, 'learning_rate': 0.0686180604201361, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:37<09:48, 3.68s/it] 69%|██████▉ | 361/520 [22:41<09:46, 3.69s/it] {'loss': 1.2721, 'grad_norm': 0.000493188491913718, 'learning_rate': 0.06783422411731932, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:41<09:46, 3.69s/it] 70%|██████▉ | 362/520 [22:44<09:40, 3.68s/it] {'loss': 1.2032, 'grad_norm': 0.0005871425943070067, 'learning_rate': 0.06705358029480908, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:44<09:40, 3.68s/it] 70%|██████▉ | 363/520 [22:48<09:35, 3.67s/it] {'loss': 1.2498, 'grad_norm': 0.0005192459523206618, 'learning_rate': 0.06627615928384743, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:48<09:35, 3.67s/it] 70%|███████ | 364/520 [22:52<09:34, 3.68s/it] {'loss': 1.3007, 'grad_norm': 0.0005770524644363078, 'learning_rate': 0.06550199129045668, 'epoch': 0.7} + 70%|███████ | 364/520 [22:52<09:34, 3.68s/it] 70%|███████ | 365/520 [22:55<09:29, 3.67s/it] {'loss': 1.2994, 'grad_norm': 0.0005337439252859432, 'learning_rate': 0.06473110639426617, 'epoch': 0.7} + 70%|███████ | 365/520 [22:55<09:29, 3.67s/it] 70%|███████ | 366/520 [22:59<09:27, 3.69s/it] {'loss': 1.2563, 'grad_norm': 0.0004884756123939073, 'learning_rate': 0.06396353454734312, 'epoch': 0.7} + 70%|███████ | 366/520 [22:59<09:27, 3.69s/it] 71%|███████ | 367/520 [23:03<09:23, 3.68s/it] {'loss': 1.2561, 'grad_norm': 0.0005612642071341434, 'learning_rate': 0.06319930557302914, 'epoch': 0.71} + 71%|███████ | 367/520 [23:03<09:23, 3.68s/it] 71%|███████ | 368/520 [23:06<09:19, 3.68s/it] {'loss': 1.11, 'grad_norm': 0.0005557094714983921, 'learning_rate': 0.062438449164781556, 'epoch': 0.71} + 71%|███████ | 368/520 [23:06<09:19, 3.68s/it] 71%|███████ | 369/520 [23:10<09:18, 3.70s/it] {'loss': 1.2515, 'grad_norm': 0.0005203578064082526, 'learning_rate': 0.0616809948850193, 'epoch': 0.71} + 71%|███████ | 369/520 [23:10<09:18, 3.70s/it] 71%|███████ | 370/520 [23:14<09:15, 3.70s/it] {'loss': 1.1681, 'grad_norm': 0.0005188769542653655, 'learning_rate': 0.060926972163974774, 'epoch': 0.71} + 71%|███████ | 370/520 [23:14<09:15, 3.70s/it] 71%|███████▏ | 371/520 [23:17<09:10, 3.69s/it] {'loss': 1.1695, 'grad_norm': 0.0005629158343811055, 'learning_rate': 0.060176410298549955, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:17<09:10, 3.69s/it] 72%|███████▏ | 372/520 [23:21<09:07, 3.70s/it] {'loss': 1.3396, 'grad_norm': 0.0004961899445677345, 'learning_rate': 0.05942933845117836, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:21<09:07, 3.70s/it] 72%|███████▏ | 373/520 [23:25<09:02, 3.69s/it] {'loss': 1.2239, 'grad_norm': 0.0006310008410007086, 'learning_rate': 0.058685785648691896, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:25<09:02, 3.69s/it] 72%|███████▏ | 374/520 [23:29<09:09, 3.76s/it] {'loss': 1.257, 'grad_norm': 0.0005604797447389898, 'learning_rate': 0.05794578078119291, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:29<09:09, 3.76s/it] 72%|███████▏ | 375/520 [23:33<09:13, 3.82s/it] {'loss': 1.1608, 'grad_norm': 0.0005042253786097856, 'learning_rate': 0.05720935260093177, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:33<09:13, 3.82s/it] 72%|███████▏ | 376/520 [23:37<09:14, 3.85s/it] {'loss': 1.2764, 'grad_norm': 0.0004861108324564217, 'learning_rate': 0.05647652972118997, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:37<09:14, 3.85s/it] 72%|███████▎ | 377/520 [23:41<09:15, 3.89s/it] {'loss': 1.2255, 'grad_norm': 0.0006145940373870381, 'learning_rate': 0.0557473406151679, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:41<09:15, 3.89s/it] 73%|███████▎ | 378/520 [23:45<09:13, 3.90s/it] {'loss': 1.2766, 'grad_norm': 0.0005371415964571743, 'learning_rate': 0.055021813614879046, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:45<09:13, 3.90s/it] 73%|███████▎ | 379/520 [23:48<09:11, 3.91s/it] {'loss': 1.248, 'grad_norm': 0.0005620447519381695, 'learning_rate': 0.05429997691004873, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:48<09:11, 3.91s/it] 73%|███████▎ | 380/520 [23:52<09:07, 3.91s/it] {'loss': 1.3118, 'grad_norm': 0.0005515239320580024, 'learning_rate': 0.05358185854701909, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:52<09:07, 3.91s/it] 73%|███████▎ | 381/520 [23:56<09:04, 3.92s/it] {'loss': 1.2499, 'grad_norm': 0.0005099786845340344, 'learning_rate': 0.052867486427659455, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:56<09:04, 3.92s/it] 73%|███████▎ | 382/520 [24:00<09:01, 3.93s/it] {'loss': 1.275, 'grad_norm': 0.000494837681752637, 'learning_rate': 0.05215688830828187, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:00<09:01, 3.93s/it] 74%|███████▎ | 383/520 [24:04<08:58, 3.93s/it] {'loss': 1.0942, 'grad_norm': 0.0006498466499530014, 'learning_rate': 0.05145009179856295, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:04<08:58, 3.93s/it] 74%|███████▍ | 384/520 [24:08<08:54, 3.93s/it] {'loss': 1.3437, 'grad_norm': 0.0005085280902122047, 'learning_rate': 0.05074712436047112, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:08<08:54, 3.93s/it] 74%|███████▍ | 385/520 [24:12<08:50, 3.93s/it] {'loss': 1.2361, 'grad_norm': 0.0005079569908504295, 'learning_rate': 0.050048013307199414, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:12<08:50, 3.93s/it] 74%|███████▍ | 386/520 [24:16<08:44, 3.92s/it] {'loss': 1.1749, 'grad_norm': 0.00045328130612873476, 'learning_rate': 0.04935278580210451, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:16<08:44, 3.92s/it] 74%|███████▍ | 387/520 [24:20<08:42, 3.93s/it] {'loss': 1.3371, 'grad_norm': 0.0005539950823952054, 'learning_rate': 0.048661468857650964, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:20<08:42, 3.93s/it] 75%|███████▍ | 388/520 [24:24<08:37, 3.92s/it] {'loss': 1.1335, 'grad_norm': 0.0004819420977713963, 'learning_rate': 0.04797408933436206, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:24<08:37, 3.92s/it] 75%|███████▍ | 389/520 [24:28<08:32, 3.92s/it] {'loss': 1.1884, 'grad_norm': 0.0006175447675315775, 'learning_rate': 0.04729067393977597, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:28<08:32, 3.92s/it] 75%|███████▌ | 390/520 [24:32<08:28, 3.91s/it] {'loss': 1.2573, 'grad_norm': 0.0005943429673346224, 'learning_rate': 0.04661124922740795, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:32<08:28, 3.91s/it] 75%|███████▌ | 391/520 [24:36<08:24, 3.91s/it] {'loss': 1.3327, 'grad_norm': 0.0005669901352087096, 'learning_rate': 0.04593584159571875, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:36<08:24, 3.91s/it] 75%|███████▌ | 392/520 [24:39<08:21, 3.91s/it] {'loss': 1.1515, 'grad_norm': 0.000556437825541362, 'learning_rate': 0.045264477287089086, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:39<08:21, 3.91s/it] 76%|███████▌ | 393/520 [24:43<08:18, 3.92s/it] {'loss': 1.1722, 'grad_norm': 0.0004699504610367271, 'learning_rate': 0.044597182386799626, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:43<08:18, 3.92s/it] 76%|███████▌ | 394/520 [24:47<08:12, 3.91s/it] {'loss': 1.2168, 'grad_norm': 0.000592145039897263, 'learning_rate': 0.04393398282201788, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:47<08:12, 3.91s/it] 76%|███████▌ | 395/520 [24:51<08:11, 3.93s/it] {'loss': 1.1772, 'grad_norm': 0.0005345081631910857, 'learning_rate': 0.04327490436079051, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:51<08:11, 3.93s/it] 76%|███████▌ | 396/520 [24:55<08:06, 3.92s/it] {'loss': 1.2579, 'grad_norm': 0.0005649540153531135, 'learning_rate': 0.04261997261104223, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:55<08:06, 3.92s/it] 76%|███████▋ | 397/520 [24:59<08:04, 3.94s/it] {'loss': 1.226, 'grad_norm': 0.0005878186715215174, 'learning_rate': 0.04196921301958104, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:59<08:04, 3.94s/it] 77%|███████▋ | 398/520 [25:03<08:00, 3.94s/it] {'loss': 1.2265, 'grad_norm': 0.0005438954615273866, 'learning_rate': 0.0413226508711091, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:03<08:00, 3.94s/it] 77%|███████▋ | 399/520 [25:07<07:57, 3.95s/it] {'loss': 1.2108, 'grad_norm': 0.0006472275422543097, 'learning_rate': 0.04068031128724075, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:07<07:57, 3.95s/it] 77%|███████▋ | 400/520 [25:11<07:54, 3.96s/it] {'loss': 1.2467, 'grad_norm': 0.0004986539255224604, 'learning_rate': 0.04004221922552608, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:11<07:54, 3.96s/it] 77%|███████▋ | 401/520 [25:15<07:50, 3.95s/it] {'loss': 1.0598, 'grad_norm': 0.0005923865390348963, 'learning_rate': 0.039408399478481404, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:15<07:50, 3.95s/it] 77%|███████▋ | 402/520 [25:19<07:46, 3.96s/it] {'loss': 1.1882, 'grad_norm': 0.000525975869813174, 'learning_rate': 0.038778876672625986, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:19<07:46, 3.96s/it] 78%|███████▊ | 403/520 [25:23<07:42, 3.95s/it] {'loss': 1.2206, 'grad_norm': 0.0005939365523460285, 'learning_rate': 0.03815367526752516, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:23<07:42, 3.95s/it] 78%|███████▊ | 404/520 [25:27<07:39, 3.96s/it] {'loss': 1.1259, 'grad_norm': 0.0006682265581676579, 'learning_rate': 0.03753281955483985, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:27<07:39, 3.96s/it] 78%|███████▊ | 405/520 [25:31<07:35, 3.96s/it] {'loss': 1.2239, 'grad_norm': 0.0005674180327499977, 'learning_rate': 0.036916333657383026, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:31<07:35, 3.96s/it] 78%|███████▊ | 406/520 [25:35<07:30, 3.95s/it] {'loss': 1.1578, 'grad_norm': 0.000643132146144448, 'learning_rate': 0.03630424152818203, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:35<07:30, 3.95s/it] 78%|███████▊ | 407/520 [25:39<07:28, 3.97s/it] {'loss': 1.3102, 'grad_norm': 0.0005641745350215606, 'learning_rate': 0.035696566949548376, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:39<07:28, 3.97s/it] 78%|███████▊ | 408/520 [25:43<07:23, 3.96s/it] {'loss': 1.2061, 'grad_norm': 0.0005887078223294235, 'learning_rate': 0.03509333353215331, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:43<07:23, 3.96s/it] 79%|███████▊ | 409/520 [25:47<07:19, 3.96s/it] {'loss': 1.334, 'grad_norm': 0.0006503542002298398, 'learning_rate': 0.03449456471411058, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:47<07:19, 3.96s/it] 79%|███████▉ | 410/520 [25:51<07:15, 3.96s/it] {'loss': 1.0575, 'grad_norm': 0.0005264579611107348, 'learning_rate': 0.03390028376006589, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:51<07:15, 3.96s/it] 79%|███████▉ | 411/520 [25:55<07:10, 3.95s/it] {'loss': 1.3035, 'grad_norm': 0.0005967423083058357, 'learning_rate': 0.03331051376029279, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:55<07:10, 3.95s/it] 79%|███████▉ | 412/520 [25:58<07:04, 3.93s/it] {'loss': 1.2138, 'grad_norm': 0.0005163551451469263, 'learning_rate': 0.032725277629795525, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:58<07:04, 3.93s/it] 79%|███████▉ | 413/520 [26:02<06:54, 3.87s/it] {'loss': 1.2522, 'grad_norm': 0.000556000478015874, 'learning_rate': 0.03214459810741897, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:02<06:54, 3.87s/it] 80%|███████▉ | 414/520 [26:06<06:43, 3.81s/it] {'loss': 1.047, 'grad_norm': 0.0004909171111805495, 'learning_rate': 0.0315684977549647, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:06<06:43, 3.81s/it] 80%|███████▉ | 415/520 [26:09<06:35, 3.77s/it] {'loss': 1.1994, 'grad_norm': 0.0004957189773780444, 'learning_rate': 0.03099699895631474, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:09<06:35, 3.77s/it] 80%|████████ | 416/520 [26:13<06:28, 3.74s/it] {'loss': 1.1176, 'grad_norm': 0.0005733811090514112, 'learning_rate': 0.030430123916561674, 'epoch': 0.8} + 80%|████████ | 416/520 [26:13<06:28, 3.74s/it] 80%|████████ | 417/520 [26:17<06:23, 3.73s/it] {'loss': 1.273, 'grad_norm': 0.0005283543956732535, 'learning_rate': 0.02986789466114582, 'epoch': 0.8} + 80%|████████ | 417/520 [26:17<06:23, 3.73s/it] 80%|████████ | 418/520 [26:21<06:18, 3.71s/it] {'loss': 1.262, 'grad_norm': 0.0004961787498060479, 'learning_rate': 0.029310333034999747, 'epoch': 0.8} + 80%|████████ | 418/520 [26:21<06:18, 3.71s/it] 81%|████████ | 419/520 [26:24<06:13, 3.70s/it] {'loss': 1.2481, 'grad_norm': 0.0005775378153797654, 'learning_rate': 0.028757460701699215, 'epoch': 0.81} + 81%|████████ | 419/520 [26:24<06:13, 3.70s/it] 81%|████████ | 420/520 [26:28<06:12, 3.72s/it] {'loss': 1.1432, 'grad_norm': 0.0005823934576305461, 'learning_rate': 0.028209299142621522, 'epoch': 0.81} + 81%|████████ | 420/520 [26:28<06:12, 3.72s/it] 81%|████████ | 421/520 [26:32<06:07, 3.71s/it] {'loss': 1.0803, 'grad_norm': 0.0005744775343554001, 'learning_rate': 0.027665869656110974, 'epoch': 0.81} + 81%|████████ | 421/520 [26:32<06:07, 3.71s/it] 81%|████████ | 422/520 [26:35<06:02, 3.70s/it] {'loss': 1.2061, 'grad_norm': 0.0005976590178828387, 'learning_rate': 0.027127193356651213, 'epoch': 0.81} + 81%|████████ | 422/520 [26:35<06:02, 3.70s/it] 81%|████████▏ | 423/520 [26:39<06:01, 3.72s/it] {'loss': 1.1685, 'grad_norm': 0.0005805030560313814, 'learning_rate': 0.026593291174044995, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:39<06:01, 3.72s/it] 82%|████████▏ | 424/520 [26:43<05:58, 3.73s/it] {'loss': 1.3359, 'grad_norm': 0.0005343112136846504, 'learning_rate': 0.026064183852600797, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:43<05:58, 3.73s/it] 82%|████████▏ | 425/520 [26:47<05:53, 3.72s/it] {'loss': 1.1914, 'grad_norm': 0.0005386260422578202, 'learning_rate': 0.025539891950326875, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:47<05:53, 3.72s/it] 82%|████████▏ | 426/520 [26:50<05:49, 3.72s/it] {'loss': 1.2271, 'grad_norm': 0.0006969786761574587, 'learning_rate': 0.025020435838132675, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:50<05:49, 3.72s/it] 82%|████████▏ | 427/520 [26:54<05:44, 3.70s/it] {'loss': 1.127, 'grad_norm': 0.000521961193905504, 'learning_rate': 0.024505835699037037, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:54<05:44, 3.70s/it] 82%|████████▏ | 428/520 [26:58<05:41, 3.71s/it] {'loss': 1.1069, 'grad_norm': 0.0005672966295824575, 'learning_rate': 0.02399611152738429, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:58<05:41, 3.71s/it] 82%|████████▎ | 429/520 [27:01<05:37, 3.71s/it] {'loss': 1.2092, 'grad_norm': 0.0005348964714764011, 'learning_rate': 0.023491283128067173, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:01<05:37, 3.71s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:05<05:33, 3.70s/it] {'loss': 1.2054, 'grad_norm': 0.0005108507660342241, 'learning_rate': 0.02299137011575738, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:05<05:33, 3.70s/it] 83%|████████▎ | 431/520 [27:09<05:28, 3.69s/it] {'loss': 1.2188, 'grad_norm': 0.0005527080050073793, 'learning_rate': 0.02249639191414363, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:09<05:28, 3.69s/it] 83%|████████▎ | 432/520 [27:12<05:25, 3.70s/it] {'loss': 1.1138, 'grad_norm': 0.0005214819409868668, 'learning_rate': 0.02200636775517666, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:12<05:25, 3.70s/it] 83%|████████▎ | 433/520 [27:16<05:21, 3.69s/it] {'loss': 1.2533, 'grad_norm': 0.0005343406536786094, 'learning_rate': 0.0215213166783223, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:16<05:21, 3.69s/it] 83%|████████▎ | 434/520 [27:20<05:17, 3.70s/it] {'loss': 0.9957, 'grad_norm': 0.0005203994126574116, 'learning_rate': 0.021041257529821455, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:20<05:17, 3.70s/it] 84%|████████▎ | 435/520 [27:23<05:13, 3.68s/it] {'loss': 1.2944, 'grad_norm': 0.0006208614672758089, 'learning_rate': 0.020566208961958043, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:23<05:13, 3.68s/it] 84%|████████▍ | 436/520 [27:27<05:10, 3.69s/it] {'loss': 1.0821, 'grad_norm': 0.0005496878079672527, 'learning_rate': 0.020096189432334193, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:27<05:10, 3.69s/it] 84%|████████▍ | 437/520 [27:31<05:06, 3.70s/it] {'loss': 1.3107, 'grad_norm': 0.0005698998081577124, 'learning_rate': 0.01963121720315304, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:31<05:06, 3.70s/it] 84%|████████▍ | 438/520 [27:35<05:03, 3.70s/it] {'loss': 1.1159, 'grad_norm': 0.0005513053145557372, 'learning_rate': 0.0191713103405092, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:35<05:03, 3.70s/it] 84%|████████▍ | 439/520 [27:38<04:59, 3.70s/it] {'loss': 1.1959, 'grad_norm': 0.00046651856884724694, 'learning_rate': 0.018716486713686947, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:38<04:59, 3.70s/it] 85%|████████▍ | 440/520 [27:42<04:57, 3.71s/it] {'loss': 1.1676, 'grad_norm': 0.0005757042303414366, 'learning_rate': 0.0182667639944657, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:42<04:57, 3.71s/it] 85%|████████▍ | 441/520 [27:46<04:53, 3.71s/it] {'loss': 1.228, 'grad_norm': 0.0005228586986829175, 'learning_rate': 0.017822159656433637, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:46<04:53, 3.71s/it] 85%|████████▌ | 442/520 [27:49<04:49, 3.71s/it] {'loss': 1.2249, 'grad_norm': 0.000668942491251628, 'learning_rate': 0.01738269097430855, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:49<04:49, 3.71s/it] 85%|████████▌ | 443/520 [27:53<04:46, 3.72s/it] {'loss': 1.2383, 'grad_norm': 0.0005252490499936798, 'learning_rate': 0.01694837502326674, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:53<04:46, 3.72s/it] 85%|████████▌ | 444/520 [27:57<04:42, 3.72s/it] {'loss': 1.2067, 'grad_norm': 0.0005113241754245012, 'learning_rate': 0.016519228678279717, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:57<04:42, 3.72s/it] 86%|████████▌ | 445/520 [28:01<04:37, 3.70s/it] {'loss': 1.1327, 'grad_norm': 0.0005413462837773036, 'learning_rate': 0.0160952686134583, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:01<04:37, 3.70s/it] 86%|████████▌ | 446/520 [28:04<04:35, 3.72s/it] {'loss': 1.2992, 'grad_norm': 0.000511997228072767, 'learning_rate': 0.01567651130140486, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:04<04:35, 3.72s/it] 86%|████████▌ | 447/520 [28:08<04:31, 3.72s/it] {'loss': 1.2101, 'grad_norm': 0.0005379551691364618, 'learning_rate': 0.015262973012573393, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:08<04:31, 3.72s/it] 86%|████████▌ | 448/520 [28:12<04:27, 3.72s/it] {'loss': 1.1965, 'grad_norm': 0.000602298052831957, 'learning_rate': 0.014854669814637143, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:12<04:27, 3.72s/it] 86%|████████▋ | 449/520 [28:15<04:23, 3.71s/it] {'loss': 1.2601, 'grad_norm': 0.0005509476048009203, 'learning_rate': 0.014451617571864528, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:15<04:23, 3.71s/it] 87%|████████▋ | 450/520 [28:19<04:19, 3.71s/it] {'loss': 1.23, 'grad_norm': 0.000557963792071798, 'learning_rate': 0.014053831944502508, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:19<04:19, 3.71s/it] 87%|████████▋ | 451/520 [28:23<04:15, 3.70s/it] {'loss': 1.2381, 'grad_norm': 0.0005426826693757706, 'learning_rate': 0.013661328388168359, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:23<04:15, 3.70s/it] 87%|████████▋ | 452/520 [28:26<04:10, 3.68s/it] {'loss': 1.3003, 'grad_norm': 0.0005443144251336296, 'learning_rate': 0.013274122153249029, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:26<04:10, 3.68s/it] 87%|████████▋ | 453/520 [28:30<04:07, 3.69s/it] {'loss': 1.2767, 'grad_norm': 0.00064367541750542, 'learning_rate': 0.01289222828430855, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:30<04:07, 3.69s/it] 87%|████████▋ | 454/520 [28:34<04:03, 3.69s/it] {'loss': 1.1421, 'grad_norm': 0.0005441866024256408, 'learning_rate': 0.01251566161950357, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:34<04:03, 3.69s/it] 88%|████████▊ | 455/520 [28:38<04:00, 3.70s/it] {'loss': 1.2756, 'grad_norm': 0.000550971548744566, 'learning_rate': 0.012144436790006902, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:38<04:00, 3.70s/it] 88%|████████▊ | 456/520 [28:41<03:57, 3.71s/it] {'loss': 1.1987, 'grad_norm': 0.0005634991827952048, 'learning_rate': 0.01177856821943884, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:41<03:57, 3.71s/it] 88%|████████▊ | 457/520 [28:45<03:52, 3.70s/it] {'loss': 1.2014, 'grad_norm': 0.0005416026391237961, 'learning_rate': 0.011418070123306989, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:45<03:52, 3.70s/it] 88%|████████▊ | 458/520 [28:49<03:50, 3.72s/it] {'loss': 1.3382, 'grad_norm': 0.0006444647492397991, 'learning_rate': 0.011062956508453703, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:49<03:50, 3.72s/it] 88%|████████▊ | 459/520 [28:53<03:49, 3.76s/it] {'loss': 1.2593, 'grad_norm': 0.0005569468567522009, 'learning_rate': 0.010713241172511967, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:53<03:49, 3.76s/it] 88%|████████▊ | 460/520 [28:56<03:46, 3.77s/it] {'loss': 1.1476, 'grad_norm': 0.0005520677921024485, 'learning_rate': 0.01036893770336938, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:56<03:46, 3.77s/it] 89%|████████▊ | 461/520 [29:00<03:42, 3.77s/it] {'loss': 1.2941, 'grad_norm': 0.00047859031783320283, 'learning_rate': 0.010030059478640024, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:00<03:42, 3.77s/it] 89%|████████▉ | 462/520 [29:04<03:37, 3.74s/it] {'loss': 1.3478, 'grad_norm': 0.0005667422273281739, 'learning_rate': 0.009696619665144901, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:04<03:37, 3.74s/it] 89%|████████▉ | 463/520 [29:08<03:32, 3.73s/it] {'loss': 1.1138, 'grad_norm': 0.0005658271876988356, 'learning_rate': 0.009368631218400135, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:08<03:32, 3.73s/it] 89%|████████▉ | 464/520 [29:11<03:28, 3.72s/it] {'loss': 1.2472, 'grad_norm': 0.0005646605831529269, 'learning_rate': 0.009046106882113752, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:11<03:28, 3.72s/it] 89%|████████▉ | 465/520 [29:15<03:24, 3.71s/it] {'loss': 1.3562, 'grad_norm': 0.0005726024661817773, 'learning_rate': 0.00872905918769048, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:15<03:24, 3.71s/it] 90%|████████▉ | 466/520 [29:19<03:20, 3.72s/it] {'loss': 1.2482, 'grad_norm': 0.0005024337043571025, 'learning_rate': 0.008417500453744864, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:19<03:20, 3.72s/it] 90%|████████▉ | 467/520 [29:22<03:17, 3.72s/it] {'loss': 1.2275, 'grad_norm': 0.0005094115486977662, 'learning_rate': 0.008111442785622596, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:22<03:17, 3.72s/it] 90%|█████████ | 468/520 [29:26<03:12, 3.71s/it] {'loss': 1.2232, 'grad_norm': 0.0006389793535265267, 'learning_rate': 0.0078108980749302444, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:26<03:12, 3.71s/it] 90%|█████████ | 469/520 [29:30<03:09, 3.71s/it] {'loss': 1.2829, 'grad_norm': 0.0006145020757117709, 'learning_rate': 0.0075158779990731, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:30<03:09, 3.71s/it] 90%|█████████ | 470/520 [29:34<03:05, 3.71s/it] {'loss': 1.1481, 'grad_norm': 0.0004983805960742107, 'learning_rate': 0.007226394020801646, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:34<03:05, 3.71s/it] 91%|█████████ | 471/520 [29:37<03:01, 3.71s/it] {'loss': 1.1804, 'grad_norm': 0.0005706499147215113, 'learning_rate': 0.006942457387765976, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:37<03:01, 3.71s/it] 91%|█████████ | 472/520 [29:41<02:58, 3.72s/it] {'loss': 1.1473, 'grad_norm': 0.0005028517865721276, 'learning_rate': 0.0066640791320788815, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:41<02:58, 3.72s/it] 91%|█████████ | 473/520 [29:45<02:55, 3.73s/it] {'loss': 1.218, 'grad_norm': 0.0005566466752637883, 'learning_rate': 0.006391270069887289, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:45<02:55, 3.73s/it] 91%|█████████ | 474/520 [29:48<02:51, 3.73s/it] {'loss': 1.2647, 'grad_norm': 0.0005325622142989862, 'learning_rate': 0.006124040800951835, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:48<02:51, 3.73s/it] 91%|█████████▏| 475/520 [29:52<02:48, 3.74s/it] {'loss': 1.1768, 'grad_norm': 0.0005412316485227054, 'learning_rate': 0.005862401708235076, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:52<02:48, 3.74s/it] 92%|█████████▏| 476/520 [29:56<02:44, 3.73s/it] {'loss': 1.1983, 'grad_norm': 0.0005819331969248136, 'learning_rate': 0.0056063629574981955, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:56<02:44, 3.73s/it] 92%|█████████▏| 477/520 [30:00<02:41, 3.75s/it] {'loss': 1.1895, 'grad_norm': 0.0005987006460683737, 'learning_rate': 0.00535593449690585, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:00<02:41, 3.75s/it] 92%|█████████▏| 478/520 [30:03<02:37, 3.75s/it] {'loss': 1.1342, 'grad_norm': 0.0005464112271061114, 'learning_rate': 0.00511112605663977, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:03<02:37, 3.75s/it] 92%|█████████▏| 479/520 [30:07<02:33, 3.76s/it] {'loss': 1.2406, 'grad_norm': 0.0005833353732546566, 'learning_rate': 0.004871947148520584, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:07<02:33, 3.76s/it] 92%|█████████▏| 480/520 [30:11<02:30, 3.76s/it] {'loss': 1.252, 'grad_norm': 0.0005337429626410908, 'learning_rate': 0.004638407065638323, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:11<02:30, 3.76s/it] 92%|█████████▎| 481/520 [30:15<02:25, 3.74s/it] {'loss': 1.2475, 'grad_norm': 0.0005006067245175642, 'learning_rate': 0.004410514881991357, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:15<02:25, 3.74s/it] 93%|█████████▎| 482/520 [30:18<02:21, 3.73s/it] {'loss': 1.2645, 'grad_norm': 0.0005166229784848141, 'learning_rate': 0.004188279452133825, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:18<02:21, 3.73s/it] 93%|█████████▎| 483/520 [30:22<02:17, 3.72s/it] {'loss': 1.2096, 'grad_norm': 0.0005482015323550188, 'learning_rate': 0.003971709410831498, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:22<02:17, 3.72s/it] 93%|█████████▎| 484/520 [30:26<02:13, 3.71s/it] {'loss': 1.2215, 'grad_norm': 0.0005883437340580699, 'learning_rate': 0.003760813172726457, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:26<02:13, 3.71s/it] 93%|█████████▎| 485/520 [30:30<02:10, 3.71s/it] {'loss': 1.1693, 'grad_norm': 0.0005375902643621074, 'learning_rate': 0.0035555989320099953, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:30<02:10, 3.71s/it] 93%|█████████▎| 486/520 [30:33<02:06, 3.71s/it] {'loss': 1.2885, 'grad_norm': 0.000547311860338659, 'learning_rate': 0.003356074662104319, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:33<02:06, 3.71s/it] 94%|█████████▎| 487/520 [30:37<02:02, 3.70s/it] {'loss': 1.1393, 'grad_norm': 0.0005212831611102562, 'learning_rate': 0.0031622481153527446, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:37<02:02, 3.70s/it] 94%|█████████▍| 488/520 [30:41<02:00, 3.75s/it] {'loss': 1.0872, 'grad_norm': 0.0005471300552239304, 'learning_rate': 0.0029741268227184256, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:41<02:00, 3.75s/it] 94%|█████████▍| 489/520 [30:44<01:55, 3.73s/it] {'loss': 1.2644, 'grad_norm': 0.00047569945747283625, 'learning_rate': 0.0027917180934918517, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:44<01:55, 3.73s/it] 94%|█████████▍| 490/520 [30:48<01:52, 3.74s/it] {'loss': 1.2118, 'grad_norm': 0.0005716822835490985, 'learning_rate': 0.002615029015006759, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:48<01:52, 3.74s/it] 94%|█████████▍| 491/520 [30:52<01:48, 3.75s/it] {'loss': 1.1731, 'grad_norm': 0.0005594425733903195, 'learning_rate': 0.0024440664523648014, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:52<01:48, 3.75s/it] 95%|█████████▍| 492/520 [30:56<01:44, 3.73s/it] {'loss': 1.283, 'grad_norm': 0.0005610211449702886, 'learning_rate': 0.0022788370481687968, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:56<01:44, 3.73s/it] 95%|█████████▍| 493/520 [30:59<01:40, 3.71s/it] {'loss': 1.2816, 'grad_norm': 0.0005826863527267859, 'learning_rate': 0.002119347222264617, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:59<01:40, 3.71s/it] 95%|█████████▌| 494/520 [31:03<01:36, 3.70s/it] {'loss': 1.2197, 'grad_norm': 0.0005499530129579223, 'learning_rate': 0.0019656031714918366, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:03<01:36, 3.70s/it] 95%|█████████▌| 495/520 [31:07<01:32, 3.69s/it] {'loss': 1.1921, 'grad_norm': 0.0005498840500214766, 'learning_rate': 0.0018176108694427928, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:07<01:32, 3.69s/it] 95%|█████████▌| 496/520 [31:10<01:28, 3.70s/it] {'loss': 1.119, 'grad_norm': 0.0005446333846936907, 'learning_rate': 0.0016753760662307216, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:10<01:28, 3.70s/it] 96%|█████████▌| 497/520 [31:14<01:25, 3.71s/it] {'loss': 1.1925, 'grad_norm': 0.0005599750252622396, 'learning_rate': 0.001538904288266102, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:14<01:25, 3.71s/it] 96%|█████████▌| 498/520 [31:18<01:21, 3.71s/it] {'loss': 1.188, 'grad_norm': 0.0005748488794204245, 'learning_rate': 0.0014082008380420785, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:18<01:21, 3.71s/it] 96%|█████████▌| 499/520 [31:22<01:17, 3.71s/it] {'loss': 1.341, 'grad_norm': 0.0005834175304198937, 'learning_rate': 0.0012832707939284427, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:22<01:17, 3.71s/it] 96%|█████████▌| 500/520 [31:25<01:13, 3.70s/it] {'loss': 1.3194, 'grad_norm': 0.0006035717107379575, 'learning_rate': 0.0011641190099741904, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:25<01:13, 3.70s/it] 96%|█████████▋| 501/520 [31:29<01:10, 3.71s/it] {'loss': 1.2431, 'grad_norm': 0.0005694990525738017, 'learning_rate': 0.0010507501157190568, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:29<01:10, 3.71s/it] 97%|█████████▋| 502/520 [31:33<01:06, 3.70s/it] {'loss': 1.2225, 'grad_norm': 0.0005275188394861692, 'learning_rate': 0.0009431685160136093, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:33<01:06, 3.70s/it] 97%|█████████▋| 503/520 [31:36<01:02, 3.69s/it] {'loss': 1.2256, 'grad_norm': 0.0006570647608841532, 'learning_rate': 0.0008413783908480354, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:36<01:02, 3.69s/it] 97%|█████████▋| 504/520 [31:40<00:59, 3.71s/it] {'loss': 1.2171, 'grad_norm': 0.0006089130576425983, 'learning_rate': 0.0007453836951897885, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:40<00:59, 3.71s/it] 97%|█████████▋| 505/520 [31:44<00:55, 3.70s/it] {'loss': 1.2608, 'grad_norm': 0.0005624852375028172, 'learning_rate': 0.000655188158829928, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:44<00:55, 3.70s/it] 97%|█████████▋| 506/520 [31:47<00:51, 3.69s/it] {'loss': 1.1757, 'grad_norm': 0.0005581154798762002, 'learning_rate': 0.0005707952862381682, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:47<00:51, 3.69s/it] 98%|█████████▊| 507/520 [31:51<00:47, 3.68s/it] {'loss': 1.3778, 'grad_norm': 0.0005279397163682362, 'learning_rate': 0.0004922083564267377, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:51<00:47, 3.68s/it] 98%|█████████▊| 508/520 [31:55<00:43, 3.67s/it] {'loss': 1.2927, 'grad_norm': 0.0005503437653581228, 'learning_rate': 0.0004194304228229806, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:55<00:43, 3.67s/it] 98%|█████████▊| 509/520 [31:58<00:40, 3.67s/it] {'loss': 1.266, 'grad_norm': 0.0005257946478032437, 'learning_rate': 0.00035246431315066884, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:58<00:40, 3.67s/it] 98%|█████████▊| 510/520 [32:02<00:36, 3.66s/it] {'loss': 1.216, 'grad_norm': 0.0005430786286394286, 'learning_rate': 0.00029131262932022284, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:02<00:36, 3.66s/it] 98%|█████████▊| 511/520 [32:06<00:32, 3.65s/it] {'loss': 1.1879, 'grad_norm': 0.0005522392320766472, 'learning_rate': 0.0002359777473275093, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:06<00:32, 3.65s/it] 98%|█████████▊| 512/520 [32:09<00:29, 3.66s/it] {'loss': 1.0718, 'grad_norm': 0.0005481965103469515, 'learning_rate': 0.00018646181716164834, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:09<00:29, 3.66s/it] 99%|█████████▊| 513/520 [32:13<00:25, 3.65s/it] {'loss': 1.2746, 'grad_norm': 0.0006037224161828541, 'learning_rate': 0.00014276676272133025, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:13<00:25, 3.65s/it] 99%|█████████▉| 514/520 [32:17<00:21, 3.65s/it] {'loss': 1.2479, 'grad_norm': 0.0005064001097028144, 'learning_rate': 0.00010489428174020876, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:17<00:21, 3.65s/it] 99%|█████████▉| 515/520 [32:20<00:18, 3.66s/it] {'loss': 1.3029, 'grad_norm': 0.0006587173316918914, 'learning_rate': 7.284584572085362e-05, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:20<00:18, 3.66s/it] 99%|█████████▉| 516/520 [32:24<00:14, 3.66s/it] {'loss': 1.2032, 'grad_norm': 0.0005847334355270024, 'learning_rate': 4.662269987756318e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:24<00:14, 3.66s/it] 99%|█████████▉| 517/520 [32:28<00:10, 3.65s/it] {'loss': 1.2737, 'grad_norm': 0.0005509748249673064, 'learning_rate': 2.6225863088036316e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:28<00:10, 3.65s/it] 100%|█████████▉| 518/520 [32:31<00:07, 3.64s/it] {'loss': 1.2156, 'grad_norm': 0.0006429010413148342, 'learning_rate': 1.1656127853770792e-05, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:31<00:07, 3.64s/it] 100%|█████████▉| 519/520 [32:35<00:03, 3.68s/it] {'loss': 1.2338, 'grad_norm': 0.000575361728343398, 'learning_rate': 2.9140602692712123e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:35<00:03, 3.68s/it] 100%|██████████| 520/520 [32:40<00:00, 3.94s/it] {'loss': 1.2501, 'grad_norm': 0.0005137877741887649, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:40<00:00, 3.94s/it] {'train_runtime': 1960.0354, 'train_samples_per_second': 33.943, 'train_steps_per_second': 0.265, 'train_loss': 1.2784929848634279, 'epoch': 1.0} + 100%|██████████| 520/520 [32:40<00:00, 3.94s/it] 100%|██████████| 520/520 [32:40<00:00, 3.77s/it] +[2025-10-09 08:52:42,612] [INFO] [launch.py:348:main] Process 920737 exits successfully. +[2025-10-09 08:52:42,613] [INFO] [launch.py:348:main] Process 920736 exits successfully. +[2025-10-09 08:52:42,613] [INFO] [launch.py:348:main] Process 920735 exits successfully. +[2025-10-09 08:52:43,614] [INFO] [launch.py:348:main] Process 920738 exits successfully. +[2025-10-09 08:52:43,615] [INFO] [launch.py:348:main] Process 920734 exits successfully. +[2025-10-09 08:52:43,615] [INFO] [launch.py:348:main] Process 920733 exits successfully. +[2025-10-09 08:52:43,615] [INFO] [launch.py:348:main] Process 920732 exits successfully. +[2025-10-09 08:52:46,619] [INFO] [launch.py:348:main] Process 920731 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-1_connector-3.0_0.5_3e-1_ablation_20251009_081157.log +Timestamp: 2025-10-09 08:52:49 +===================================== diff --git a/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation_20251009_113104.log b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation_20251009_113104.log new file mode 100644 index 0000000000000000000000000000000000000000..6d1336b9500b8101a9e873b7e7cc62e5940e3bd3 --- /dev/null +++ b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation_20251009_113104.log @@ -0,0 +1,1786 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation_20251009_113104.log +Timestamp: 2025-10-09 11:31:04 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 11:31:06,859] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 11:31:09,538] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-09 11:31:09,540] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 3e-2 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 3e-2 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 11:31:12,156] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 11:31:13,199] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-09 11:31:13,199] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-09 11:31:13,199] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-09 11:31:13,199] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-09 11:31:13,199] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-09 11:31:13,199] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-09 11:31:13,199] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-09 11:31:13,201] [INFO] [launch.py:253:main] process 1197335 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 11:31:13,203] [INFO] [launch.py:253:main] process 1197336 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 11:31:13,205] [INFO] [launch.py:253:main] process 1197337 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 11:31:13,207] [INFO] [launch.py:253:main] process 1197338 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 11:31:13,209] [INFO] [launch.py:253:main] process 1197339 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 11:31:13,211] [INFO] [launch.py:253:main] process 1197340 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 11:31:13,214] [INFO] [launch.py:253:main] process 1197341 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 11:31:13,216] [INFO] [launch.py:253:main] process 1197342 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_3e-2_connector-3.0_0.5_3e-2_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '3e-2', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '3e-2', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 11:31:19,751] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 11:31:19,841] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 11:31:20,024] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 11:31:20,095] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 11:31:20,095] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 11:31:20,096] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 11:31:20,097] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 11:31:20,101] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 11:31:20,162] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 11:31:20,255] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 11:31:20,435] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 11:31:20,506] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 11:31:20,507] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 11:31:20,507] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 11:31:20,507] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-09 11:31:20,508] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 11:31:20,518] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1197335:1197335 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197335:1197335 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197335:1197335 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1197335:1197335 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1197335:1197335 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1197335:1197335 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1197341:1197341 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1197341:1197341 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197341:1197341 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197341:1197341 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1197341:1197341 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1197341:1197341 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1197336:1197336 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1197336:1197336 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197336:1197336 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197336:1197336 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1197336:1197336 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1197336:1197336 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1197337:1197337 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1197337:1197337 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197337:1197337 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197337:1197337 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1197337:1197337 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1197337:1197337 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1197339:1197339 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1197339:1197339 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197339:1197339 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197339:1197339 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1197339:1197339 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1197339:1197339 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1197342:1197342 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1197342:1197342 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197342:1197342 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197342:1197342 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1197342:1197342 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1197342:1197342 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1197340:1197340 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1197340:1197340 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197340:1197340 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197340:1197340 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1197340:1197340 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1197340:1197340 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1197338:1197338 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1197338:1197338 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197338:1197338 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197338:1197338 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1197338:1197338 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1197338:1197338 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO ncclCommInitRank comm 0x562cc7e66580 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xf7d2f444fb81504a - Init START +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO ncclCommInitRank comm 0x55d1eb9e7b00 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xf7d2f444fb81504a - Init START +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO ncclCommInitRank comm 0x55595b6324d0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xf7d2f444fb81504a - Init START +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO ncclCommInitRank comm 0x556e17abcbe0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xf7d2f444fb81504a - Init START +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO ncclCommInitRank comm 0x5603ab116300 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xf7d2f444fb81504a - Init START +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO ncclCommInitRank comm 0x563e5e437990 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xf7d2f444fb81504a - Init START +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO ncclCommInitRank comm 0x55ff1b001fa0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xf7d2f444fb81504a - Init START +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO ncclCommInitRank comm 0x558ebdde5b80 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xf7d2f444fb81504a - Init START +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO comm 0x556e17abcbe0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO comm 0x5603ab116300 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO comm 0x558ebdde5b80 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO comm 0x562cc7e66580 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO comm 0x55d1eb9e7b00 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO comm 0x55595b6324d0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO comm 0x55ff1b001fa0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO comm 0x563e5e437990 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1197335:1198807 [0] NCCL INFO ncclCommInitRank comm 0x55ff1b001fa0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xf7d2f444fb81504a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1197337:1198810 [2] NCCL INFO ncclCommInitRank comm 0x55d1eb9e7b00 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xf7d2f444fb81504a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1197341:1198808 [6] NCCL INFO ncclCommInitRank comm 0x5603ab116300 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xf7d2f444fb81504a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1197340:1198813 [5] NCCL INFO ncclCommInitRank comm 0x556e17abcbe0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xf7d2f444fb81504a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197342:1198812 [7] NCCL INFO ncclCommInitRank comm 0x563e5e437990 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xf7d2f444fb81504a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1197338:1198814 [3] NCCL INFO ncclCommInitRank comm 0x562cc7e66580 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xf7d2f444fb81504a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197336:1198809 [1] NCCL INFO ncclCommInitRank comm 0x55595b6324d0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xf7d2f444fb81504a - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1197339:1198811 [4] NCCL INFO ncclCommInitRank comm 0x558ebdde5b80 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xf7d2f444fb81504a - Init COMPLETE +[2025-10-09 13:24:46,138] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-09 13:24:50,589] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-09 13:25:03,633 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-09 13:25:03,638 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:006->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1197339:1205863 [4] NCCL INFO ncclCommInitRank comm 0x7f6f7406a8f0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x6d511ef4c6b32b6f - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197341:1205858 [6] NCCL INFO ncclCommInitRank comm 0x7fd3c406af40 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x6d511ef4c6b32b6f - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197337:1205862 [2] NCCL INFO ncclCommInitRank comm 0x7f579406ac90 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x6d511ef4c6b32b6f - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197335:1205857 [0] NCCL INFO ncclCommInitRank comm 0x7f81b806b190 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x6d511ef4c6b32b6f - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197342:1205861 [7] NCCL INFO ncclCommInitRank comm 0x7fc7c806ae00 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x6d511ef4c6b32b6f - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197340:1205864 [5] NCCL INFO ncclCommInitRank comm 0x7fc80806ac30 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x6d511ef4c6b32b6f - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197336:1205860 [1] NCCL INFO ncclCommInitRank comm 0x7fc93406a970 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x6d511ef4c6b32b6f - Init COMPLETE +ywang29-vrdb-test1-worker-0:1197338:1205859 [3] NCCL INFO ncclCommInitRank comm 0x7ff15006adf0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x6d511ef4c6b32b6f - Init COMPLETE + 0%| | 1/520 [00:12<1:48:50, 12.58s/it] {'loss': 2.0453, 'grad_norm': 0.004834571526944156, 'learning_rate': 0.001875, 'epoch': 0.0} + 0%| | 1/520 [00:12<1:48:50, 12.58s/it] 0%| | 2/520 [00:16<1:03:10, 7.32s/it] {'loss': 2.0549, 'grad_norm': 0.005248640642907599, 'learning_rate': 0.00375, 'epoch': 0.0} + 0%| | 2/520 [00:16<1:03:10, 7.32s/it] 1%| | 3/520 [00:19<48:38, 5.64s/it] {'loss': 2.1899, 'grad_norm': 0.0060070136342471, 'learning_rate': 0.005625, 'epoch': 0.01} + 1%| | 3/520 [00:19<48:38, 5.64s/it] 1%| | 4/520 [00:23<41:41, 4.85s/it] {'loss': 2.0656, 'grad_norm': 0.004963133945967213, 'learning_rate': 0.0075, 'epoch': 0.01} + 1%| | 4/520 [00:23<41:41, 4.85s/it] 1%| | 5/520 [00:27<37:59, 4.43s/it] {'loss': 2.2333, 'grad_norm': 0.005481431360595541, 'learning_rate': 0.009375, 'epoch': 0.01} + 1%| | 5/520 [00:27<37:59, 4.43s/it] 1%| | 6/520 [00:30<35:39, 4.16s/it] {'loss': 1.6754, 'grad_norm': 0.0028034929223959403, 'learning_rate': 0.01125, 'epoch': 0.01} + 1%| | 6/520 [00:30<35:39, 4.16s/it] 1%|▏ | 7/520 [00:34<34:09, 4.00s/it] {'loss': 2.0776, 'grad_norm': 0.005415323870495969, 'learning_rate': 0.013125, 'epoch': 0.01} + 1%|▏ | 7/520 [00:34<34:09, 4.00s/it] 2%|▏ | 8/520 [00:38<34:58, 4.10s/it] {'loss': 2.0541, 'grad_norm': 0.004572346969682921, 'learning_rate': 0.015, 'epoch': 0.02} + 2%|▏ | 8/520 [00:38<34:58, 4.10s/it] 2%|▏ | 9/520 [00:43<35:39, 4.19s/it] {'loss': 2.19, 'grad_norm': 0.00501804847232987, 'learning_rate': 0.016875, 'epoch': 0.02} + 2%|▏ | 9/520 [00:43<35:39, 4.19s/it] 2%|▏ | 10/520 [00:47<34:41, 4.08s/it] {'loss': 2.0841, 'grad_norm': 0.005625882280693355, 'learning_rate': 0.01875, 'epoch': 0.02} + 2%|▏ | 10/520 [00:47<34:41, 4.08s/it] \ No newline at end of file diff --git a/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251009_054857.log b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251009_054857.log new file mode 100644 index 0000000000000000000000000000000000000000..7e2a27c16459a79e8e56035dfc0f44be7fab9c56 --- /dev/null +++ b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251009_054857.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251009_054857.log +Timestamp: 2025-10-09 05:48:57 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 05:48:59,794] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:49:02,685] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-09 05:49:02,687] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 5 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 5 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 05:49:05,302] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:49:06,331] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-09 05:49:06,331] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-09 05:49:06,331] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-09 05:49:06,331] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-09 05:49:06,331] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-09 05:49:06,331] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-09 05:49:06,331] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-09 05:49:06,333] [INFO] [launch.py:253:main] process 829577 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:49:06,336] [INFO] [launch.py:253:main] process 829578 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:49:06,338] [INFO] [launch.py:253:main] process 829579 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:49:06,340] [INFO] [launch.py:253:main] process 829580 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:49:06,342] [INFO] [launch.py:253:main] process 829581 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:49:06,344] [INFO] [launch.py:253:main] process 829582 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:49:06,346] [INFO] [launch.py:253:main] process 829583 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 05:49:06,348] [INFO] [launch.py:253:main] process 829584 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 05:49:12,932] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:49:13,234] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:49:13,243] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:49:13,243] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:49:13,246] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:49:13,285] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:49:13,313] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:49:13,321] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 05:49:13,338] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:49:13,633] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:49:13,640] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:49:13,640] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-09 05:49:13,641] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:49:13,642] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:49:13,699] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:49:13,724] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 05:49:13,737] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:829577:829577 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829577:829577 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829577:829577 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:829577:829577 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:829577:829577 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:829577:829577 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:829580:829580 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:829580:829580 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829580:829580 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829582:829582 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:829582:829582 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829580:829580 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:829580:829580 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:829580:829580 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:829582:829582 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829582:829582 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:829582:829582 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:829582:829582 [5] NCCL INFO NET/Plugin: Using internal network plugin. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:829581:829581 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:829581:829581 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829581:829581 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829581:829581 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:829581:829581 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:829581:829581 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:829583:829583 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:829583:829583 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829583:829583 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829583:829583 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:829583:829583 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:829583:829583 [6] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:829579:829579 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:829579:829579 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829579:829579 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829579:829579 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:829579:829579 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:829579:829579 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:829584:829584 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:829584:829584 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829584:829584 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829578:829578 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:829578:829578 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829584:829584 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:829584:829584 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:829584:829584 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:829578:829578 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829578:829578 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:829578:829578 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:829578:829578 [1] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO ncclCommInitRank comm 0x55de8aacd4a0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xc4749f05c08e766a - Init START +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO ncclCommInitRank comm 0x55eaa80a2410 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xc4749f05c08e766a - Init START +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO ncclCommInitRank comm 0x55dad4d8b5d0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xc4749f05c08e766a - Init START +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO ncclCommInitRank comm 0x559993219430 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xc4749f05c08e766a - Init START +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO ncclCommInitRank comm 0x561a00a91220 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xc4749f05c08e766a - Init START +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO ncclCommInitRank comm 0x561e5ef28480 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xc4749f05c08e766a - Init START +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO ncclCommInitRank comm 0x55f57c5a3440 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xc4749f05c08e766a - Init START +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO ncclCommInitRank comm 0x55d7fc2b0bc0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xc4749f05c08e766a - Init START +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO comm 0x55d7fc2b0bc0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO comm 0x55eaa80a2410 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO comm 0x561a00a91220 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO comm 0x55dad4d8b5d0 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO comm 0x55f57c5a3440 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO comm 0x561e5ef28480 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO comm 0x55de8aacd4a0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO comm 0x559993219430 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:829584:831154 [7] NCCL INFO ncclCommInitRank comm 0x55de8aacd4a0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xc4749f05c08e766a - Init COMPLETE +ywang29-vrdb-test1-worker-0:829581:831151 [4] NCCL INFO ncclCommInitRank comm 0x55eaa80a2410 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xc4749f05c08e766a - Init COMPLETE +ywang29-vrdb-test1-worker-0:829583:831152 [6] NCCL INFO ncclCommInitRank comm 0x561e5ef28480 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xc4749f05c08e766a - Init COMPLETE +ywang29-vrdb-test1-worker-0:829582:831150 [5] NCCL INFO ncclCommInitRank comm 0x55f57c5a3440 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xc4749f05c08e766a - Init COMPLETE +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:829578:831155 [1] NCCL INFO ncclCommInitRank comm 0x55dad4d8b5d0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xc4749f05c08e766a - Init COMPLETE +ywang29-vrdb-test1-worker-0:829579:831153 [2] NCCL INFO ncclCommInitRank comm 0x55d7fc2b0bc0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xc4749f05c08e766a - Init COMPLETE +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:829577:831148 [0] NCCL INFO ncclCommInitRank comm 0x559993219430 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xc4749f05c08e766a - Init COMPLETE +ywang29-vrdb-test1-worker-0:829580:831149 [3] NCCL INFO ncclCommInitRank comm 0x561a00a91220 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xc4749f05c08e766a - Init COMPLETE +[2025-10-09 05:49:59,674] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-09 05:50:09,738] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-09 05:50:22,923 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-09 05:50:22,930 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:002->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:829584:836141 [7] NCCL INFO ncclCommInitRank comm 0x7f45c806a300 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x6e092dbb9b8805d0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:829581:836147 [4] NCCL INFO ncclCommInitRank comm 0x7f247c06acf0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x6e092dbb9b8805d0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:829580:836145 [3] NCCL INFO ncclCommInitRank comm 0x7f099406c130 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x6e092dbb9b8805d0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:829583:836144 [6] NCCL INFO ncclCommInitRank comm 0x7fd61006ae60 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x6e092dbb9b8805d0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:829577:836140 [0] NCCL INFO ncclCommInitRank comm 0x7f770406ad40 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x6e092dbb9b8805d0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:829578:836146 [1] NCCL INFO ncclCommInitRank comm 0x7f04e006a300 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x6e092dbb9b8805d0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:829582:836142 [5] NCCL INFO ncclCommInitRank comm 0x7f694c06aee0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x6e092dbb9b8805d0 - Init COMPLETE +ywang29-vrdb-test1-worker-0:829579:836143 [2] NCCL INFO ncclCommInitRank comm 0x7f987806a940 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x6e092dbb9b8805d0 - Init COMPLETE + 0%| | 1/520 [00:12<1:47:51, 12.47s/it] {'loss': 2.0453, 'grad_norm': 0.0048341796434899385, 'learning_rate': 0.3125, 'epoch': 0.0} + 0%| | 1/520 [00:12<1:47:51, 12.47s/it] 0%| | 2/520 [00:16<1:04:03, 7.42s/it] {'loss': 2.0549, 'grad_norm': 0.005249111655607626, 'learning_rate': 0.625, 'epoch': 0.0} + 0%| | 2/520 [00:16<1:04:03, 7.42s/it] 1%| | 3/520 [00:20<50:00, 5.80s/it] {'loss': 1.6757, 'grad_norm': 0.0017032062556984237, 'learning_rate': 0.9375, 'epoch': 0.01} + 1%| | 3/520 [00:20<50:00, 5.80s/it] 1%| | 4/520 [00:24<43:16, 5.03s/it] {'loss': 1.5657, 'grad_norm': 0.001602831048320955, 'learning_rate': 1.25, 'epoch': 0.01} + 1%| | 4/520 [00:24<43:16, 5.03s/it] 1%| | 5/520 [00:27<39:34, 4.61s/it] {'loss': 1.9742, 'grad_norm': 0.009402436997543959, 'learning_rate': 1.5625, 'epoch': 0.01} + 1%| | 5/520 [00:27<39:34, 4.61s/it] 1%| | 6/520 [00:31<37:23, 4.37s/it] {'loss': 3.5686, 'grad_norm': 0.06818734996040729, 'learning_rate': 1.875, 'epoch': 0.01} + 1%| | 6/520 [00:31<37:23, 4.37s/it] 1%|▏ | 7/520 [00:35<35:51, 4.19s/it] {'loss': 18.7973, 'grad_norm': 1.775179747037966, 'learning_rate': 2.1875, 'epoch': 0.01} + 1%|▏ | 7/520 [00:35<35:51, 4.19s/it] 2%|▏ | 8/520 [00:40<36:39, 4.30s/it] {'loss': 23.3319, 'grad_norm': 0.5956428292825123, 'learning_rate': 2.5, 'epoch': 0.02} + 2%|▏ | 8/520 [00:40<36:39, 4.30s/it] 2%|▏ | 9/520 [00:44<36:39, 4.30s/it] {'loss': 24.3171, 'grad_norm': 0.40554170885828744, 'learning_rate': 2.8125, 'epoch': 0.02} + 2%|▏ | 9/520 [00:44<36:39, 4.30s/it] 2%|▏ | 10/520 [00:48<35:29, 4.18s/it] {'loss': 24.1109, 'grad_norm': 0.25850883473946046, 'learning_rate': 3.125, 'epoch': 0.02} + 2%|▏ | 10/520 [00:48<35:29, 4.18s/it] 2%|▏ | 11/520 [00:52<34:57, 4.12s/it] {'loss': 25.5223, 'grad_norm': 0.07680808964949289, 'learning_rate': 3.4375, 'epoch': 0.02} + 2%|▏ | 11/520 [00:52<34:57, 4.12s/it] 2%|▏ | 12/520 [00:56<34:23, 4.06s/it] {'loss': 21.5525, 'grad_norm': 0.0680222530133051, 'learning_rate': 3.75, 'epoch': 0.02} + 2%|▏ | 12/520 [00:56<34:23, 4.06s/it][2025-10-09 05:51:28,958] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:00<35:41, 4.22s/it] {'loss': 12.7418, 'grad_norm': 0.03157361462615068, 'learning_rate': 4.0625, 'epoch': 0.03} + 2%|▎ | 13/520 [01:00<35:41, 4.22s/it] 3%|▎ | 14/520 [01:04<34:49, 4.13s/it] {'loss': 12.0206, 'grad_norm': 0.024983776110254412, 'learning_rate': 4.375, 'epoch': 0.03} + 3%|▎ | 14/520 [01:04<34:49, 4.13s/it] 3%|▎ | 15/520 [01:08<34:15, 4.07s/it] {'loss': 12.0709, 'grad_norm': 0.006338960708773218, 'learning_rate': 4.6875, 'epoch': 0.03} + 3%|▎ | 15/520 [01:08<34:15, 4.07s/it] 3%|▎ | 16/520 [01:12<33:35, 4.00s/it] {'loss': 11.3196, 'grad_norm': 0.0048618506730995666, 'learning_rate': 5.0, 'epoch': 0.03} + 3%|▎ | 16/520 [01:12<33:35, 4.00s/it] 3%|▎ | 17/520 [01:16<33:11, 3.96s/it] {'loss': 10.1101, 'grad_norm': 0.0031964248872657884, 'learning_rate': 4.999951432328845, 'epoch': 0.03} + 3%|▎ | 17/520 [01:16<33:11, 3.96s/it] 3%|▎ | 18/520 [01:20<33:00, 3.95s/it] {'loss': 9.4542, 'grad_norm': 0.003216503440978984, 'learning_rate': 4.999805731202437, 'epoch': 0.03} + 3%|▎ | 18/520 [01:20<33:00, 3.95s/it] 4%|▎ | 19/520 [01:24<32:42, 3.92s/it] {'loss': 10.4443, 'grad_norm': 0.0018450523841369902, 'learning_rate': 4.999562902281866, 'epoch': 0.04} + 4%|▎ | 19/520 [01:24<32:42, 3.92s/it] 4%|▍ | 20/520 [01:28<32:38, 3.92s/it] {'loss': 8.9432, 'grad_norm': 0.0012656547912327908, 'learning_rate': 4.999222955002041, 'epoch': 0.04} + 4%|▍ | 20/520 [01:28<32:38, 3.92s/it] 4%|▍ | 21/520 [01:32<32:42, 3.93s/it] {'loss': 9.7196, 'grad_norm': 0.001330935938895057, 'learning_rate': 4.998785902571319, 'epoch': 0.04} + 4%|▍ | 21/520 [01:32<32:42, 3.93s/it] 4%|▍ | 22/520 [01:36<32:36, 3.93s/it] {'loss': 9.1989, 'grad_norm': 0.0010675693001763156, 'learning_rate': 4.998251761970996, 'epoch': 0.04} + 4%|▍ | 22/520 [01:36<32:36, 3.93s/it] 4%|▍ | 23/520 [01:39<32:24, 3.91s/it] {'loss': 8.9345, 'grad_norm': 0.0008172783444560883, 'learning_rate': 4.997620553954645, 'epoch': 0.04} + 4%|▍ | 23/520 [01:39<32:24, 3.91s/it] 5%|▍ | 24/520 [01:43<32:11, 3.89s/it] {'loss': 9.7514, 'grad_norm': 0.0007743392615186814, 'learning_rate': 4.996892303047305, 'epoch': 0.05} + 5%|▍ | 24/520 [01:43<32:11, 3.89s/it] 5%|▍ | 25/520 [01:47<32:05, 3.89s/it] {'loss': 8.7667, 'grad_norm': 0.0007916347655326833, 'learning_rate': 4.996067037544542, 'epoch': 0.05} + 5%|▍ | 25/520 [01:47<32:05, 3.89s/it] 5%|▌ | 26/520 [01:51<31:56, 3.88s/it] {'loss': 8.9927, 'grad_norm': 0.0005291202929320679, 'learning_rate': 4.99514478951133, 'epoch': 0.05} + 5%|▌ | 26/520 [01:51<31:56, 3.88s/it] 5%|▌ | 27/520 [01:55<31:45, 3.86s/it] {'loss': 8.7429, 'grad_norm': 0.0005956742551144676, 'learning_rate': 4.994125594780822, 'epoch': 0.05} + 5%|▌ | 27/520 [01:55<31:45, 3.86s/it] 5%|▌ | 28/520 [01:59<31:40, 3.86s/it] {'loss': 8.5043, 'grad_norm': 0.0006091885858498054, 'learning_rate': 4.99300949295295, 'epoch': 0.05} + 5%|▌ | 28/520 [01:59<31:40, 3.86s/it] 6%|▌ | 29/520 [02:03<31:34, 3.86s/it] {'loss': 8.5776, 'grad_norm': 0.00046776669542080665, 'learning_rate': 4.9917965273928875, 'epoch': 0.06} + 6%|▌ | 29/520 [02:03<31:34, 3.86s/it] 6%|▌ | 30/520 [02:06<31:32, 3.86s/it] {'loss': 9.5803, 'grad_norm': 0.00045299997933213686, 'learning_rate': 4.990486745229364, 'epoch': 0.06} + 6%|▌ | 30/520 [02:06<31:32, 3.86s/it] 6%|▌ | 31/520 [02:10<31:26, 3.86s/it] {'loss': 8.7616, 'grad_norm': 0.0004620577404285808, 'learning_rate': 4.989080197352834, 'epoch': 0.06} + 6%|▌ | 31/520 [02:10<31:26, 3.86s/it] 6%|▌ | 32/520 [02:14<31:19, 3.85s/it] {'loss': 10.3312, 'grad_norm': 0.0005639545400982373, 'learning_rate': 4.987576938413504, 'epoch': 0.06} + 6%|▌ | 32/520 [02:14<31:19, 3.85s/it] 6%|▋ | 33/520 [02:18<31:20, 3.86s/it] {'loss': 8.732, 'grad_norm': 0.00034942948968462554, 'learning_rate': 4.985977026819199, 'epoch': 0.06} + 6%|▋ | 33/520 [02:18<31:20, 3.86s/it] 7%|▋ | 34/520 [02:22<31:12, 3.85s/it] {'loss': 8.3853, 'grad_norm': 0.0003741142094097753, 'learning_rate': 4.984280524733107, 'epoch': 0.07} + 7%|▋ | 34/520 [02:22<31:12, 3.85s/it] 7%|▋ | 35/520 [02:26<31:02, 3.84s/it] {'loss': 8.6022, 'grad_norm': 0.0003913283429344886, 'learning_rate': 4.9824874980713485, 'epoch': 0.07} + 7%|▋ | 35/520 [02:26<31:02, 3.84s/it] 7%|▋ | 36/520 [02:29<30:31, 3.78s/it] {'loss': 8.907, 'grad_norm': 0.0003418020343053457, 'learning_rate': 4.98059801650043, 'epoch': 0.07} + 7%|▋ | 36/520 [02:29<30:31, 3.78s/it] 7%|▋ | 37/520 [02:33<30:30, 3.79s/it] {'loss': 9.426, 'grad_norm': 0.0003649394291849815, 'learning_rate': 4.9786121534345265, 'epoch': 0.07} + 7%|▋ | 37/520 [02:33<30:30, 3.79s/it] 7%|▋ | 38/520 [02:37<30:39, 3.82s/it] {'loss': 8.9762, 'grad_norm': 0.0003756806344038845, 'learning_rate': 4.976529986032632, 'epoch': 0.07} + 7%|▋ | 38/520 [02:37<30:39, 3.82s/it] 8%|▊ | 39/520 [02:41<30:30, 3.80s/it] {'loss': 8.5385, 'grad_norm': 0.00037233666491838594, 'learning_rate': 4.974351595195565, 'epoch': 0.07} + 8%|▊ | 39/520 [02:41<30:30, 3.80s/it] 8%|▊ | 40/520 [02:44<30:10, 3.77s/it] {'loss': 8.6752, 'grad_norm': 0.00032335223987214797, 'learning_rate': 4.9720770655628215, 'epoch': 0.08} + 8%|▊ | 40/520 [02:44<30:10, 3.77s/it] 8%|▊ | 41/520 [02:48<30:03, 3.77s/it] {'loss': 8.6124, 'grad_norm': 0.00028503950680511535, 'learning_rate': 4.9697064855092865, 'epoch': 0.08} + 8%|▊ | 41/520 [02:48<30:03, 3.77s/it] 8%|▊ | 42/520 [02:52<30:02, 3.77s/it] {'loss': 8.7082, 'grad_norm': 0.0002878988768175681, 'learning_rate': 4.9672399471418025, 'epoch': 0.08} + 8%|▊ | 42/520 [02:52<30:02, 3.77s/it] 8%|▊ | 43/520 [02:56<30:11, 3.80s/it] {'loss': 9.2057, 'grad_norm': 0.00029959625957712136, 'learning_rate': 4.964677546295589, 'epoch': 0.08} + 8%|▊ | 43/520 [02:56<30:11, 3.80s/it] 8%|▊ | 44/520 [03:00<29:59, 3.78s/it] {'loss': 9.4689, 'grad_norm': 0.0002873726732578379, 'learning_rate': 4.962019382530521, 'epoch': 0.08} + 8%|▊ | 44/520 [03:00<29:59, 3.78s/it] 9%|▊ | 45/520 [03:03<29:57, 3.78s/it] {'loss': 8.4161, 'grad_norm': 0.00027146515431226184, 'learning_rate': 4.959265559127253, 'epoch': 0.09} + 9%|▊ | 45/520 [03:03<29:57, 3.78s/it] 9%|▉ | 46/520 [03:07<29:46, 3.77s/it] {'loss': 9.6385, 'grad_norm': 0.00040185471869745484, 'learning_rate': 4.9564161830832205, 'epoch': 0.09} + 9%|▉ | 46/520 [03:07<29:46, 3.77s/it] 9%|▉ | 47/520 [03:11<29:46, 3.78s/it] {'loss': 8.679, 'grad_norm': 0.00019543871860995557, 'learning_rate': 4.953471365108469, 'epoch': 0.09} + 9%|▉ | 47/520 [03:11<29:46, 3.78s/it] 9%|▉ | 48/520 [03:15<29:49, 3.79s/it] {'loss': 8.4087, 'grad_norm': 0.0002391920487794452, 'learning_rate': 4.950431219621359, 'epoch': 0.09} + 9%|▉ | 48/520 [03:15<29:49, 3.79s/it] 9%|▉ | 49/520 [03:18<29:29, 3.76s/it] {'loss': 8.586, 'grad_norm': 0.0002212905595580715, 'learning_rate': 4.947295864744121, 'epoch': 0.09} + 9%|▉ | 49/520 [03:18<29:29, 3.76s/it] 10%|▉ | 50/520 [03:22<29:15, 3.74s/it] {'loss': 8.554, 'grad_norm': 0.0002203842631609215, 'learning_rate': 4.944065422298261, 'epoch': 0.1} + 10%|▉ | 50/520 [03:22<29:15, 3.74s/it] 10%|▉ | 51/520 [03:26<29:18, 3.75s/it] {'loss': 8.3221, 'grad_norm': 0.0002883903632485146, 'learning_rate': 4.9407400177998335, 'epoch': 0.1} + 10%|▉ | 51/520 [03:26<29:18, 3.75s/it] 10%|█ | 52/520 [03:30<29:06, 3.73s/it] {'loss': 8.6488, 'grad_norm': 0.00016454051235059218, 'learning_rate': 4.937319780454558, 'epoch': 0.1} + 10%|█ | 52/520 [03:30<29:06, 3.73s/it] 10%|█ | 53/520 [03:33<29:12, 3.75s/it] {'loss': 8.8235, 'grad_norm': 0.00020146440236490606, 'learning_rate': 4.933804843152808, 'epoch': 0.1} + 10%|█ | 53/520 [03:33<29:12, 3.75s/it] 10%|█ | 54/520 [03:37<29:13, 3.76s/it] {'loss': 8.4219, 'grad_norm': 0.00026439340052617725, 'learning_rate': 4.930195342464437, 'epoch': 0.1} + 10%|█ | 54/520 [03:37<29:13, 3.76s/it] 11%|█ | 55/520 [03:41<29:09, 3.76s/it] {'loss': 8.2821, 'grad_norm': 0.00021816102828322095, 'learning_rate': 4.926491418633478, 'epoch': 0.11} + 11%|█ | 55/520 [03:41<29:09, 3.76s/it] 11%|█ | 56/520 [03:45<28:54, 3.74s/it] {'loss': 8.7006, 'grad_norm': 0.000164066651828978, 'learning_rate': 4.922693215572695, 'epoch': 0.11} + 11%|█ | 56/520 [03:45<28:54, 3.74s/it] 11%|█ | 57/520 [03:48<29:06, 3.77s/it] {'loss': 8.4313, 'grad_norm': 0.0001788334122123508, 'learning_rate': 4.918800880857991, 'epoch': 0.11} + 11%|█ | 57/520 [03:48<29:06, 3.77s/it] 11%|█ | 58/520 [03:52<29:16, 3.80s/it] {'loss': 8.7435, 'grad_norm': 0.000205895068782763, 'learning_rate': 4.91481456572267, 'epoch': 0.11} + 11%|█ | 58/520 [03:52<29:16, 3.80s/it] 11%|█▏ | 59/520 [03:56<29:22, 3.82s/it] {'loss': 9.1369, 'grad_norm': 0.0001597559451316532, 'learning_rate': 4.91073442505157, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:56<29:22, 3.82s/it] 12%|█▏ | 60/520 [04:00<29:27, 3.84s/it] {'loss': 8.581, 'grad_norm': 0.00014023459851045012, 'learning_rate': 4.90656061737503, 'epoch': 0.12} + 12%|█▏ | 60/520 [04:00<29:27, 3.84s/it] 12%|█▏ | 61/520 [04:04<29:26, 3.85s/it] {'loss': 9.4633, 'grad_norm': 0.00019695758404185995, 'learning_rate': 4.9022933048627495, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:04<29:26, 3.85s/it] 12%|█▏ | 62/520 [04:08<29:25, 3.86s/it] {'loss': 8.5631, 'grad_norm': 0.00013979644831105234, 'learning_rate': 4.897932653317469, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:08<29:25, 3.86s/it] 12%|█▏ | 63/520 [04:12<29:22, 3.86s/it] {'loss': 8.6348, 'grad_norm': 0.00017476967336121612, 'learning_rate': 4.893478832168546, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:12<29:22, 3.86s/it] 12%|█▏ | 64/520 [04:16<29:18, 3.86s/it] {'loss': 8.6848, 'grad_norm': 0.0001897301869165349, 'learning_rate': 4.888932014465352, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:16<29:18, 3.86s/it] 12%|█▎ | 65/520 [04:19<29:22, 3.87s/it] {'loss': 8.4939, 'grad_norm': 0.00014992526779218767, 'learning_rate': 4.884292376870567, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:19<29:22, 3.87s/it] 13%|█▎ | 66/520 [04:23<29:19, 3.88s/it] {'loss': 8.7212, 'grad_norm': 0.0001325213011103852, 'learning_rate': 4.879560099653306, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:23<29:19, 3.88s/it] 13%|█▎ | 67/520 [04:27<29:20, 3.89s/it] {'loss': 8.2883, 'grad_norm': 0.00017145626110210335, 'learning_rate': 4.874735366682115, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:27<29:20, 3.89s/it] 13%|█▎ | 68/520 [04:31<29:18, 3.89s/it] {'loss': 8.4151, 'grad_norm': 0.0001521626571668717, 'learning_rate': 4.86981836541783, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:31<29:18, 3.89s/it] 13%|█▎ | 69/520 [04:35<29:15, 3.89s/it] {'loss': 8.2725, 'grad_norm': 0.0001588995571100234, 'learning_rate': 4.86480928690629, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:35<29:15, 3.89s/it] 13%|█▎ | 70/520 [04:39<29:10, 3.89s/it] {'loss': 8.4149, 'grad_norm': 0.00011787444653889323, 'learning_rate': 4.859708325770919, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:39<29:10, 3.89s/it] 14%|█▎ | 71/520 [04:43<29:02, 3.88s/it] {'loss': 8.4331, 'grad_norm': 0.0001448052134902369, 'learning_rate': 4.854515680205159, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:43<29:02, 3.88s/it] 14%|█▍ | 72/520 [04:47<28:57, 3.88s/it] {'loss': 8.5417, 'grad_norm': 0.00012446491679627246, 'learning_rate': 4.849231551964771, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:47<28:57, 3.88s/it] 14%|█▍ | 73/520 [04:51<28:59, 3.89s/it] {'loss': 8.277, 'grad_norm': 0.00015553756257601764, 'learning_rate': 4.8438561463599985, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:51<28:59, 3.89s/it] 14%|█▍ | 74/520 [04:54<28:56, 3.89s/it] {'loss': 8.504, 'grad_norm': 0.00011493040751900151, 'learning_rate': 4.838389672247585, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:54<28:56, 3.89s/it] 14%|█▍ | 75/520 [04:58<28:53, 3.90s/it] {'loss': 8.2919, 'grad_norm': 0.00015741673786176107, 'learning_rate': 4.832832342022666, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:58<28:53, 3.90s/it] 15%|█▍ | 76/520 [05:02<28:50, 3.90s/it] {'loss': 9.4445, 'grad_norm': 0.0001647915784663021, 'learning_rate': 4.82718437161051, 'epoch': 0.15} + 15%|█▍ | 76/520 [05:02<28:50, 3.90s/it] 15%|█▍ | 77/520 [05:06<28:42, 3.89s/it] {'loss': 8.1323, 'grad_norm': 0.0001642809216104687, 'learning_rate': 4.821445980458134, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:06<28:42, 3.89s/it] 15%|█▌ | 78/520 [05:10<28:36, 3.88s/it] {'loss': 8.4566, 'grad_norm': 0.00013699650866924015, 'learning_rate': 4.815617391525771, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:10<28:36, 3.88s/it] 15%|█▌ | 79/520 [05:14<28:34, 3.89s/it] {'loss': 8.5221, 'grad_norm': 0.00012522029947278855, 'learning_rate': 4.809698831278217, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:14<28:34, 3.89s/it] 15%|█▌ | 80/520 [05:18<28:08, 3.84s/it] {'loss': 9.4514, 'grad_norm': 0.00017689229855073483, 'learning_rate': 4.803690529676019, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:18<28:08, 3.84s/it] 16%|█▌ | 81/520 [05:21<28:08, 3.85s/it] {'loss': 8.7625, 'grad_norm': 0.0002474346382362699, 'learning_rate': 4.797592720166551, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:21<28:08, 3.85s/it] 16%|█▌ | 82/520 [05:25<27:55, 3.82s/it] {'loss': 8.4362, 'grad_norm': 0.0003967283077792939, 'learning_rate': 4.791405639674941, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:25<27:55, 3.82s/it] 16%|█▌ | 83/520 [05:29<27:52, 3.83s/it] {'loss': 8.4982, 'grad_norm': 0.0006791981346396387, 'learning_rate': 4.785129528594858, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:29<27:52, 3.83s/it] 16%|█▌ | 84/520 [05:33<27:27, 3.78s/it] {'loss': 8.5837, 'grad_norm': 0.0001438230275930527, 'learning_rate': 4.778764630779183, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:33<27:27, 3.78s/it] 16%|█▋ | 85/520 [05:37<27:22, 3.78s/it] {'loss': 8.5238, 'grad_norm': 0.00011660709944600811, 'learning_rate': 4.772311193530527, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:37<27:22, 3.78s/it] 17%|█▋ | 86/520 [05:40<27:25, 3.79s/it] {'loss': 8.8149, 'grad_norm': 0.00013030759981325502, 'learning_rate': 4.765769467591625, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:40<27:25, 3.79s/it] 17%|█▋ | 87/520 [05:44<27:23, 3.79s/it] {'loss': 9.3124, 'grad_norm': 0.00014356974049854973, 'learning_rate': 4.759139707135592, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:44<27:23, 3.79s/it] 17%|█▋ | 88/520 [05:48<27:25, 3.81s/it] {'loss': 9.9653, 'grad_norm': 0.00018411041661237253, 'learning_rate': 4.752422169756048, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:48<27:25, 3.81s/it] 17%|█▋ | 89/520 [05:52<27:22, 3.81s/it] {'loss': 8.4377, 'grad_norm': 0.00011889565145138874, 'learning_rate': 4.74561711645711, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:52<27:22, 3.81s/it] 17%|█▋ | 90/520 [05:56<27:26, 3.83s/it] {'loss': 8.3101, 'grad_norm': 0.00011355961542790204, 'learning_rate': 4.7387248116432525, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:56<27:26, 3.83s/it] 18%|█▊ | 91/520 [05:59<27:21, 3.83s/it] {'loss': 8.581, 'grad_norm': 0.00010714049186613091, 'learning_rate': 4.731745523109029, 'epoch': 0.17} + 18%|█▊ | 91/520 [06:00<27:21, 3.83s/it] 18%|█▊ | 92/520 [06:03<27:06, 3.80s/it] {'loss': 8.4913, 'grad_norm': 0.00011917782081208039, 'learning_rate': 4.724679522028672, 'epoch': 0.18} + 18%|█▊ | 92/520 [06:03<27:06, 3.80s/it] 18%|█▊ | 93/520 [06:07<26:52, 3.78s/it] {'loss': 8.3001, 'grad_norm': 0.00011236228572241507, 'learning_rate': 4.717527082945554, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:07<26:52, 3.78s/it] 18%|█▊ | 94/520 [06:11<26:32, 3.74s/it] {'loss': 8.7004, 'grad_norm': 0.00010222669136062067, 'learning_rate': 4.710288483761524, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:11<26:32, 3.74s/it] 18%|█▊ | 95/520 [06:14<26:37, 3.76s/it] {'loss': 8.4198, 'grad_norm': 9.43259895727513e-05, 'learning_rate': 4.7029640057261055, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:14<26:37, 3.76s/it] 18%|█▊ | 96/520 [06:18<26:44, 3.78s/it] {'loss': 8.3284, 'grad_norm': 0.0001022640688410792, 'learning_rate': 4.695553933425572, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:18<26:44, 3.78s/it] 19%|█▊ | 97/520 [06:22<26:22, 3.74s/it] {'loss': 8.2537, 'grad_norm': 0.00014548903647968058, 'learning_rate': 4.688058554771884, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:22<26:22, 3.74s/it] 19%|█▉ | 98/520 [06:26<26:17, 3.74s/it] {'loss': 8.4145, 'grad_norm': 0.00012556259441982604, 'learning_rate': 4.680478160991513, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:26<26:17, 3.74s/it] 19%|█▉ | 99/520 [06:29<26:07, 3.72s/it] {'loss': 8.4693, 'grad_norm': 9.63930467938439e-05, 'learning_rate': 4.672813046614116, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:29<26:07, 3.72s/it] 19%|█▉ | 100/520 [06:33<25:53, 3.70s/it] {'loss': 8.9748, 'grad_norm': 0.00011387492029259776, 'learning_rate': 4.665063509461097, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:33<25:53, 3.70s/it] 19%|█▉ | 101/520 [06:37<25:51, 3.70s/it] {'loss': 8.5195, 'grad_norm': 9.602277802304861e-05, 'learning_rate': 4.657229850634033, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:37<25:51, 3.70s/it] 20%|█▉ | 102/520 [06:40<25:40, 3.68s/it] {'loss': 8.2216, 'grad_norm': 0.00016097491785146178, 'learning_rate': 4.649312374502975, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:40<25:40, 3.68s/it] 20%|█▉ | 103/520 [06:44<25:33, 3.68s/it] {'loss': 8.2665, 'grad_norm': 0.00012181076123257078, 'learning_rate': 4.6413113886946284, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:44<25:33, 3.68s/it] 20%|██ | 104/520 [06:48<25:26, 3.67s/it] {'loss': 8.4963, 'grad_norm': 0.00012585968431649182, 'learning_rate': 4.633227204080389, 'epoch': 0.2} + 20%|██ | 104/520 [06:48<25:26, 3.67s/it] 20%|██ | 105/520 [06:51<25:20, 3.66s/it] {'loss': 8.4669, 'grad_norm': 0.00010290395071225025, 'learning_rate': 4.625060134764273, 'epoch': 0.2} + 20%|██ | 105/520 [06:51<25:20, 3.66s/it] 20%|██ | 106/520 [06:55<25:15, 3.66s/it] {'loss': 9.2564, 'grad_norm': 0.000117922590460227, 'learning_rate': 4.61681049807071, 'epoch': 0.2} + 20%|██ | 106/520 [06:55<25:15, 3.66s/it] 21%|██ | 107/520 [06:59<25:13, 3.66s/it] {'loss': 9.3104, 'grad_norm': 0.00011267170694005591, 'learning_rate': 4.608478614532214, 'epoch': 0.21} + 21%|██ | 107/520 [06:59<25:13, 3.66s/it] 21%|██ | 108/520 [07:02<25:08, 3.66s/it] {'loss': 8.3381, 'grad_norm': 9.405897550851122e-05, 'learning_rate': 4.6000648078769295, 'epoch': 0.21} + 21%|██ | 108/520 [07:02<25:08, 3.66s/it] 21%|██ | 109/520 [07:06<25:06, 3.67s/it] {'loss': 9.1166, 'grad_norm': 0.00011995886839385572, 'learning_rate': 4.591569405016049, 'epoch': 0.21} + 21%|██ | 109/520 [07:06<25:06, 3.67s/it] 21%|██ | 110/520 [07:10<25:29, 3.73s/it] {'loss': 8.812, 'grad_norm': 0.00010285531641917163, 'learning_rate': 4.582992736031122, 'epoch': 0.21} + 21%|██ | 110/520 [07:10<25:29, 3.73s/it] 21%|██▏ | 111/520 [07:14<25:38, 3.76s/it] {'loss': 8.7129, 'grad_norm': 0.00010374601679564107, 'learning_rate': 4.574335134161219, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:14<25:38, 3.76s/it] 22%|██▏ | 112/520 [07:17<25:40, 3.78s/it] {'loss': 8.7365, 'grad_norm': 0.00010665200533164023, 'learning_rate': 4.565596935789987, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:17<25:40, 3.78s/it] 22%|██▏ | 113/520 [07:21<25:24, 3.75s/it] {'loss': 8.3174, 'grad_norm': 9.842442148813887e-05, 'learning_rate': 4.556778480432583, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:21<25:24, 3.75s/it] 22%|██▏ | 114/520 [07:25<25:21, 3.75s/it] {'loss': 8.6626, 'grad_norm': 0.0001315805798213551, 'learning_rate': 4.547880110722479, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:25<25:21, 3.75s/it] 22%|██▏ | 115/520 [07:29<25:12, 3.73s/it] {'loss': 8.7734, 'grad_norm': 0.00016744499272265329, 'learning_rate': 4.53890217239815, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:29<25:12, 3.73s/it] 22%|██▏ | 116/520 [07:32<25:01, 3.72s/it] {'loss': 8.6443, 'grad_norm': 0.00011618584046617884, 'learning_rate': 4.529845014289641, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:32<25:01, 3.72s/it] 22%|██▎ | 117/520 [07:36<24:49, 3.70s/it] {'loss': 8.7314, 'grad_norm': 0.00010598675187673206, 'learning_rate': 4.520708988305014, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:36<24:49, 3.70s/it] 23%|██▎ | 118/520 [07:40<24:43, 3.69s/it] {'loss': 8.3541, 'grad_norm': 0.00010077260906571849, 'learning_rate': 4.511494449416671, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:40<24:43, 3.69s/it] 23%|██▎ | 119/520 [07:43<24:43, 3.70s/it] {'loss': 8.3066, 'grad_norm': 0.00011617828094105832, 'learning_rate': 4.502201755647571, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:43<24:43, 3.70s/it] 23%|██▎ | 120/520 [07:47<24:42, 3.71s/it] {'loss': 8.1942, 'grad_norm': 0.00011256501901057036, 'learning_rate': 4.492831268057306, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:47<24:42, 3.71s/it] 23%|██▎ | 121/520 [07:51<24:31, 3.69s/it] {'loss': 8.4513, 'grad_norm': 0.00010088033642619055, 'learning_rate': 4.483383350728088, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:51<24:31, 3.69s/it] 23%|██▎ | 122/520 [07:54<24:24, 3.68s/it] {'loss': 8.3281, 'grad_norm': 9.724451116200012e-05, 'learning_rate': 4.473858370750588, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:54<24:24, 3.68s/it] 24%|██▎ | 123/520 [07:58<24:27, 3.70s/it] {'loss': 9.4977, 'grad_norm': 0.0001156876519784975, 'learning_rate': 4.4642566982096845, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:58<24:27, 3.70s/it] 24%|██▍ | 124/520 [08:02<24:22, 3.69s/it] {'loss': 8.5418, 'grad_norm': 0.00010305874291144292, 'learning_rate': 4.454578706170075, 'epoch': 0.24} + 24%|██▍ | 124/520 [08:02<24:22, 3.69s/it] 24%|██▍ | 125/520 [08:05<24:21, 3.70s/it] {'loss': 8.3719, 'grad_norm': 8.421039562644454e-05, 'learning_rate': 4.444824770661787, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:05<24:21, 3.70s/it] 24%|██▍ | 126/520 [08:10<25:38, 3.90s/it] {'loss': 8.8164, 'grad_norm': 8.426572057551945e-05, 'learning_rate': 4.434995270665569, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:10<25:38, 3.90s/it] 24%|██▍ | 127/520 [08:14<25:10, 3.84s/it] {'loss': 8.343, 'grad_norm': 0.0001032305286221475, 'learning_rate': 4.425090588098158, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:14<25:10, 3.84s/it] 25%|██▍ | 128/520 [08:17<24:54, 3.81s/it] {'loss': 8.3817, 'grad_norm': 9.22391965587617e-05, 'learning_rate': 4.415111107797445, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:17<24:54, 3.81s/it] 25%|██▍ | 129/520 [08:21<24:37, 3.78s/it] {'loss': 8.354, 'grad_norm': 0.00010760078384199779, 'learning_rate': 4.405057217507527, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:21<24:37, 3.78s/it] 25%|██▌ | 130/520 [08:25<24:20, 3.74s/it] {'loss': 8.6516, 'grad_norm': 8.079373577778211e-05, 'learning_rate': 4.394929307863633, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:25<24:20, 3.74s/it] 25%|██▌ | 131/520 [08:28<24:14, 3.74s/it] {'loss': 9.157, 'grad_norm': 9.200343188295809e-05, 'learning_rate': 4.38472777237695, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:28<24:14, 3.74s/it] 25%|██▌ | 132/520 [08:32<24:03, 3.72s/it] {'loss': 8.4372, 'grad_norm': 8.442102926366954e-05, 'learning_rate': 4.374453007419335, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:32<24:03, 3.72s/it] 26%|██▌ | 133/520 [08:36<23:57, 3.71s/it] {'loss': 8.2341, 'grad_norm': 9.25838801967202e-05, 'learning_rate': 4.3641054122079135, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:36<23:57, 3.71s/it] 26%|██▌ | 134/520 [08:39<23:53, 3.71s/it] {'loss': 8.3808, 'grad_norm': 8.293266195767086e-05, 'learning_rate': 4.353685388789566, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:39<23:53, 3.71s/it] 26%|██▌ | 135/520 [08:43<23:47, 3.71s/it] {'loss': 8.6478, 'grad_norm': 7.541734978347909e-05, 'learning_rate': 4.34319334202531, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:43<23:47, 3.71s/it] 26%|██▌ | 136/520 [08:47<23:40, 3.70s/it] {'loss': 8.4737, 'grad_norm': 9.383155001147735e-05, 'learning_rate': 4.332629679574565, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:47<23:40, 3.70s/it] 26%|██▋ | 137/520 [08:51<23:40, 3.71s/it] {'loss': 8.4161, 'grad_norm': 7.553672634315033e-05, 'learning_rate': 4.321994811879321, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:51<23:40, 3.71s/it] 27%|██▋ | 138/520 [08:54<23:30, 3.69s/it] {'loss': 8.3977, 'grad_norm': 8.366195859848446e-05, 'learning_rate': 4.3112891521481815, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:54<23:30, 3.69s/it] 27%|██▋ | 139/520 [08:58<23:37, 3.72s/it] {'loss': 8.8852, 'grad_norm': 6.88709584155214e-05, 'learning_rate': 4.3005131163403165, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:58<23:37, 3.72s/it] 27%|██▋ | 140/520 [09:02<23:34, 3.72s/it] {'loss': 9.2335, 'grad_norm': 7.102876739953034e-05, 'learning_rate': 4.289667123149296, 'epoch': 0.27} + 27%|██▋ | 140/520 [09:02<23:34, 3.72s/it] 27%|██▋ | 141/520 [09:06<23:34, 3.73s/it] {'loss': 8.605, 'grad_norm': 7.045614403056061e-05, 'learning_rate': 4.278751593986826, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:06<23:34, 3.73s/it] 27%|██▋ | 142/520 [09:09<23:22, 3.71s/it] {'loss': 9.3118, 'grad_norm': 8.491692586710385e-05, 'learning_rate': 4.267766952966369, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:09<23:22, 3.71s/it] 28%|██▊ | 143/520 [09:13<23:20, 3.71s/it] {'loss': 8.4409, 'grad_norm': 6.805838874069274e-05, 'learning_rate': 4.256713626886673, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:13<23:20, 3.71s/it] 28%|██▊ | 144/520 [09:17<23:14, 3.71s/it] {'loss': 8.1226, 'grad_norm': 8.534286705415746e-05, 'learning_rate': 4.245592045215182, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:17<23:14, 3.71s/it] 28%|██▊ | 145/520 [09:20<23:10, 3.71s/it] {'loss': 8.2638, 'grad_norm': 7.938191510694479e-05, 'learning_rate': 4.234402640071354, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:20<23:10, 3.71s/it] 28%|██▊ | 146/520 [09:24<23:14, 3.73s/it] {'loss': 9.1063, 'grad_norm': 8.890062875011648e-05, 'learning_rate': 4.223145846209867, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:24<23:14, 3.73s/it] 28%|██▊ | 147/520 [09:28<23:33, 3.79s/it] {'loss': 8.2365, 'grad_norm': 7.565207836657205e-05, 'learning_rate': 4.211822101003734, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:28<23:33, 3.79s/it] 28%|██▊ | 148/520 [09:32<23:42, 3.82s/it] {'loss': 8.4566, 'grad_norm': 7.387025671986678e-05, 'learning_rate': 4.200431844427298, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:32<23:42, 3.82s/it] 29%|██▊ | 149/520 [09:36<23:49, 3.85s/it] {'loss': 8.2286, 'grad_norm': 8.093438452871323e-05, 'learning_rate': 4.18897551903915, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:36<23:49, 3.85s/it] 29%|██▉ | 150/520 [09:40<23:58, 3.89s/it] {'loss': 8.6016, 'grad_norm': 6.66288850474894e-05, 'learning_rate': 4.177453569964925, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:40<23:58, 3.89s/it] 29%|██▉ | 151/520 [09:44<23:56, 3.89s/it] {'loss': 8.2443, 'grad_norm': 9.010814814875264e-05, 'learning_rate': 4.16586644488001, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:44<23:56, 3.89s/it] 29%|██▉ | 152/520 [09:48<23:58, 3.91s/it] {'loss': 8.2663, 'grad_norm': 9.624566615200762e-05, 'learning_rate': 4.154214593992148, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:48<23:58, 3.91s/it] 29%|██▉ | 153/520 [09:52<23:53, 3.91s/it] {'loss': 8.3579, 'grad_norm': 9.322861551428702e-05, 'learning_rate': 4.142498470023951, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:52<23:53, 3.91s/it] 30%|██▉ | 154/520 [09:55<23:51, 3.91s/it] {'loss': 8.5358, 'grad_norm': 7.796405470049445e-05, 'learning_rate': 4.1307185281953025, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:55<23:51, 3.91s/it] 30%|██▉ | 155/520 [09:59<23:50, 3.92s/it] {'loss': 8.293, 'grad_norm': 8.525625745429635e-05, 'learning_rate': 4.118875226205676, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:59<23:50, 3.92s/it] 30%|███ | 156/520 [10:03<23:35, 3.89s/it] {'loss': 8.5156, 'grad_norm': 7.224614659608415e-05, 'learning_rate': 4.106969024216348, 'epoch': 0.3} + 30%|███ | 156/520 [10:03<23:35, 3.89s/it] 30%|███ | 157/520 [10:07<23:19, 3.85s/it] {'loss': 9.2311, 'grad_norm': 9.098043489776023e-05, 'learning_rate': 4.095000384832522, 'epoch': 0.3} + 30%|███ | 157/520 [10:07<23:19, 3.85s/it] 30%|███ | 158/520 [10:11<22:55, 3.80s/it] {'loss': 8.3962, 'grad_norm': 6.031981760869837e-05, 'learning_rate': 4.08296977308535, 'epoch': 0.3} + 30%|███ | 158/520 [10:11<22:55, 3.80s/it] 31%|███ | 159/520 [10:14<22:41, 3.77s/it] {'loss': 8.3434, 'grad_norm': 6.721887179220173e-05, 'learning_rate': 4.0708776564138685, 'epoch': 0.31} + 31%|███ | 159/520 [10:14<22:41, 3.77s/it] 31%|███ | 160/520 [10:18<22:31, 3.75s/it] {'loss': 8.3254, 'grad_norm': 7.618393957369107e-05, 'learning_rate': 4.058724504646834, 'epoch': 0.31} + 31%|███ | 160/520 [10:18<22:31, 3.75s/it] 31%|███ | 161/520 [10:22<22:20, 3.74s/it] {'loss': 8.4227, 'grad_norm': 5.716796868838967e-05, 'learning_rate': 4.04651078998447, 'epoch': 0.31} + 31%|███ | 161/520 [10:22<22:20, 3.74s/it] 31%|███ | 162/520 [10:25<22:11, 3.72s/it] {'loss': 9.1833, 'grad_norm': 7.926899898166643e-05, 'learning_rate': 4.034236986980119, 'epoch': 0.31} + 31%|███ | 162/520 [10:25<22:11, 3.72s/it] 31%|███▏ | 163/520 [10:29<22:11, 3.73s/it] {'loss': 8.1803, 'grad_norm': 7.855528761614054e-05, 'learning_rate': 4.0219035725218015, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:29<22:11, 3.73s/it] 32%|███▏ | 164/520 [10:33<22:02, 3.72s/it] {'loss': 8.0619, 'grad_norm': 8.156934661389072e-05, 'learning_rate': 4.009511025813693, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:33<22:02, 3.72s/it] 32%|███▏ | 165/520 [10:37<21:55, 3.71s/it] {'loss': 8.3509, 'grad_norm': 6.122279875360161e-05, 'learning_rate': 3.997059828357501, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:37<21:55, 3.71s/it] 32%|███▏ | 166/520 [10:40<21:54, 3.71s/it] {'loss': 8.3301, 'grad_norm': 6.798922633753455e-05, 'learning_rate': 3.9845504639337537, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:40<21:54, 3.71s/it] 32%|███▏ | 167/520 [10:44<22:08, 3.76s/it] {'loss': 8.443, 'grad_norm': 6.064810571331951e-05, 'learning_rate': 3.9719834185830116, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:44<22:08, 3.76s/it] 32%|███▏ | 168/520 [10:48<22:29, 3.83s/it] {'loss': 8.3917, 'grad_norm': 8.579695410932548e-05, 'learning_rate': 3.959359180586975, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:48<22:29, 3.83s/it] 32%|███▎ | 169/520 [10:52<22:34, 3.86s/it] {'loss': 8.2729, 'grad_norm': 5.450773434786699e-05, 'learning_rate': 3.946678240449515, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:52<22:34, 3.86s/it] 33%|███▎ | 170/520 [10:56<22:39, 3.88s/it] {'loss': 8.9667, 'grad_norm': 6.0138957457895986e-05, 'learning_rate': 3.933941090877615, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:56<22:39, 3.88s/it] 33%|███▎ | 171/520 [11:00<22:45, 3.91s/it] {'loss': 8.0689, 'grad_norm': 0.00013695950772955786, 'learning_rate': 3.921148226762231, 'epoch': 0.33} + 33%|███▎ | 171/520 [11:00<22:45, 3.91s/it] 33%|███▎ | 172/520 [11:04<22:44, 3.92s/it] {'loss': 8.4806, 'grad_norm': 8.3949587704597e-05, 'learning_rate': 3.908300145159055, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:04<22:44, 3.92s/it] 33%|███▎ | 173/520 [11:08<22:31, 3.89s/it] {'loss': 8.212, 'grad_norm': 8.168781379867556e-05, 'learning_rate': 3.8953973452692106, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:08<22:31, 3.89s/it] 33%|███▎ | 174/520 [11:12<22:21, 3.88s/it] {'loss': 8.4156, 'grad_norm': 5.4561272861697994e-05, 'learning_rate': 3.8824403284198485, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:12<22:21, 3.88s/it] 34%|███▎ | 175/520 [11:16<22:20, 3.88s/it] {'loss': 8.2169, 'grad_norm': 6.831564540959798e-05, 'learning_rate': 3.8694295980446785, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:16<22:20, 3.88s/it] 34%|███▍ | 176/520 [11:19<22:19, 3.89s/it] {'loss': 9.2163, 'grad_norm': 8.563570894832769e-05, 'learning_rate': 3.8563656596643985, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:19<22:19, 3.89s/it] 34%|███▍ | 177/520 [11:23<22:14, 3.89s/it] {'loss': 8.6784, 'grad_norm': 6.164087354437824e-05, 'learning_rate': 3.84324902086706, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:23<22:14, 3.89s/it] 34%|███▍ | 178/520 [11:27<22:12, 3.90s/it] {'loss': 8.377, 'grad_norm': 6.442261437440424e-05, 'learning_rate': 3.8300801912883413, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:27<22:12, 3.90s/it] 34%|███▍ | 179/520 [11:31<22:01, 3.87s/it] {'loss': 8.4326, 'grad_norm': 8.73384864611045e-05, 'learning_rate': 3.8168596825917516, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:31<22:01, 3.87s/it] 35%|███▍ | 180/520 [11:35<21:48, 3.85s/it] {'loss': 8.4473, 'grad_norm': 5.054925295967015e-05, 'learning_rate': 3.8035880084487452, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:35<21:48, 3.85s/it] 35%|███▍ | 181/520 [11:39<21:41, 3.84s/it] {'loss': 8.4236, 'grad_norm': 7.20512555447929e-05, 'learning_rate': 3.7902656845187668, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:39<21:41, 3.84s/it] 35%|███▌ | 182/520 [11:43<21:37, 3.84s/it] {'loss': 8.3886, 'grad_norm': 4.961583220133887e-05, 'learning_rate': 3.7768932284292145, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:43<21:37, 3.84s/it] 35%|███▌ | 183/520 [11:46<21:39, 3.86s/it] {'loss': 8.3939, 'grad_norm': 6.973046900581538e-05, 'learning_rate': 3.763471159755327, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:46<21:39, 3.86s/it] 35%|███▌ | 184/520 [11:50<21:36, 3.86s/it] {'loss': 8.124, 'grad_norm': 7.54695847472807e-05, 'learning_rate': 3.75, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:50<21:36, 3.86s/it] 36%|███▌ | 185/520 [11:54<21:29, 3.85s/it] {'loss': 8.7036, 'grad_norm': 7.280279444736396e-05, 'learning_rate': 3.7364802725735187, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:54<21:29, 3.85s/it] 36%|███▌ | 186/520 [11:58<21:32, 3.87s/it] {'loss': 8.3104, 'grad_norm': 6.41429281289783e-05, 'learning_rate': 3.7229125027732235, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:58<21:32, 3.87s/it] 36%|███▌ | 187/520 [12:02<21:22, 3.85s/it] {'loss': 8.4272, 'grad_norm': 7.145593395929476e-05, 'learning_rate': 3.7092972177630994, 'epoch': 0.36} + 36%|███▌ | 187/520 [12:02<21:22, 3.85s/it] 36%|███▌ | 188/520 [12:06<21:20, 3.86s/it] {'loss': 8.2905, 'grad_norm': 7.237804440809651e-05, 'learning_rate': 3.6956349465532954, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:06<21:20, 3.86s/it] 36%|███▋ | 189/520 [12:10<21:16, 3.86s/it] {'loss': 8.5558, 'grad_norm': 7.27218367145799e-05, 'learning_rate': 3.6819262199795677, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:10<21:16, 3.86s/it] 37%|███▋ | 190/520 [12:13<21:05, 3.84s/it] {'loss': 8.2174, 'grad_norm': 6.834987690969468e-05, 'learning_rate': 3.668171570682655, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:13<21:05, 3.84s/it] 37%|███▋ | 191/520 [12:17<21:02, 3.84s/it] {'loss': 8.2168, 'grad_norm': 6.242257072648936e-05, 'learning_rate': 3.6543715330875854, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:17<21:02, 3.84s/it] 37%|███▋ | 192/520 [12:21<21:05, 3.86s/it] {'loss': 8.4503, 'grad_norm': 7.56659914091402e-05, 'learning_rate': 3.6405266433829073, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:21<21:05, 3.86s/it] 37%|███▋ | 193/520 [12:25<20:59, 3.85s/it] {'loss': 9.1125, 'grad_norm': 7.866726370968308e-05, 'learning_rate': 3.6266374394998637, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:25<20:59, 3.85s/it] 37%|███▋ | 194/520 [12:29<21:18, 3.92s/it] {'loss': 8.783, 'grad_norm': 7.617479465486137e-05, 'learning_rate': 3.6127044610914805, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:29<21:18, 3.92s/it] 38%|███▊ | 195/520 [12:33<21:14, 3.92s/it] {'loss': 8.4498, 'grad_norm': 8.299552044408895e-05, 'learning_rate': 3.5987282495116126, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:33<21:14, 3.92s/it] 38%|███▊ | 196/520 [12:37<21:03, 3.90s/it] {'loss': 8.3722, 'grad_norm': 7.459193966280997e-05, 'learning_rate': 3.584709347793895, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:37<21:03, 3.90s/it] 38%|███▊ | 197/520 [12:41<21:04, 3.91s/it] {'loss': 8.2682, 'grad_norm': 7.570693196934391e-05, 'learning_rate': 3.5706483006306566, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:41<21:04, 3.91s/it] 38%|███▊ | 198/520 [12:45<20:54, 3.89s/it] {'loss': 8.5085, 'grad_norm': 5.4357109094856516e-05, 'learning_rate': 3.5565456543517486, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:45<20:54, 3.89s/it] 38%|███▊ | 199/520 [12:49<20:56, 3.92s/it] {'loss': 8.2056, 'grad_norm': 5.894098187159341e-05, 'learning_rate': 3.5424019569033205, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:49<20:56, 3.92s/it] 38%|███▊ | 200/520 [12:52<20:49, 3.91s/it] {'loss': 9.0655, 'grad_norm': 5.510921229422029e-05, 'learning_rate': 3.5282177578265292, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:52<20:49, 3.91s/it] 39%|███▊ | 201/520 [12:56<20:53, 3.93s/it] {'loss': 8.8371, 'grad_norm': 6.29352074707701e-05, 'learning_rate': 3.513993608236188, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:56<20:53, 3.93s/it] 39%|███▉ | 202/520 [13:00<20:44, 3.91s/it] {'loss': 8.3795, 'grad_norm': 6.792036388219076e-05, 'learning_rate': 3.499730060799352, 'epoch': 0.39} + 39%|███▉ | 202/520 [13:00<20:44, 3.91s/it] 39%|███▉ | 203/520 [13:04<20:41, 3.91s/it] {'loss': 8.4143, 'grad_norm': 6.519276176799442e-05, 'learning_rate': 3.4854276697138484, 'epoch': 0.39} + 39%|███▉ | 203/520 [13:04<20:41, 3.91s/it] 39%|███▉ | 204/520 [13:08<20:31, 3.90s/it] {'loss': 8.6395, 'grad_norm': 5.940779394783999e-05, 'learning_rate': 3.4710869906867368, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:08<20:31, 3.90s/it] 39%|███▉ | 205/520 [13:12<20:31, 3.91s/it] {'loss': 9.1827, 'grad_norm': 8.13912257765466e-05, 'learning_rate': 3.4567085809127245, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:12<20:31, 3.91s/it] 40%|███▉ | 206/520 [13:16<20:23, 3.90s/it] {'loss': 8.5152, 'grad_norm': 6.012697118485939e-05, 'learning_rate': 3.442292999052513, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:16<20:23, 3.90s/it] 40%|███▉ | 207/520 [13:20<20:26, 3.92s/it] {'loss': 9.034, 'grad_norm': 7.72371659465181e-05, 'learning_rate': 3.427840805211095, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:20<20:26, 3.92s/it] 40%|████ | 208/520 [13:24<20:16, 3.90s/it] {'loss': 8.1594, 'grad_norm': 8.728181303743797e-05, 'learning_rate': 3.413352560915988, 'epoch': 0.4} + 40%|████ | 208/520 [13:24<20:16, 3.90s/it] 40%|████ | 209/520 [13:28<20:20, 3.92s/it] {'loss': 8.3623, 'grad_norm': 6.556608810624249e-05, 'learning_rate': 3.398828829095419, 'epoch': 0.4} + 40%|████ | 209/520 [13:28<20:20, 3.92s/it] 40%|████ | 210/520 [13:32<20:09, 3.90s/it] {'loss': 8.4161, 'grad_norm': 7.417778863945111e-05, 'learning_rate': 3.3842701740564536, 'epoch': 0.4} + 40%|████ | 210/520 [13:32<20:09, 3.90s/it] 41%|████ | 211/520 [13:35<20:09, 3.91s/it] {'loss': 8.4166, 'grad_norm': 6.533642821095226e-05, 'learning_rate': 3.3696771614630676, 'epoch': 0.41} + 41%|████ | 211/520 [13:35<20:09, 3.91s/it] 41%|████ | 212/520 [13:39<20:02, 3.91s/it] {'loss': 8.3979, 'grad_norm': 0.00010591719527261821, 'learning_rate': 3.3550503583141724, 'epoch': 0.41} + 41%|████ | 212/520 [13:39<20:02, 3.91s/it] 41%|████ | 213/520 [13:43<20:02, 3.92s/it] {'loss': 8.437, 'grad_norm': 0.00010424955154245987, 'learning_rate': 3.340390332921577, 'epoch': 0.41} + 41%|████ | 213/520 [13:43<20:02, 3.92s/it] 41%|████ | 214/520 [13:47<19:58, 3.92s/it] {'loss': 8.5034, 'grad_norm': 6.206917606083538e-05, 'learning_rate': 3.325697654887918, 'epoch': 0.41} + 41%|████ | 214/520 [13:47<19:58, 3.92s/it] 41%|████▏ | 215/520 [13:51<19:56, 3.92s/it] {'loss': 8.8257, 'grad_norm': 6.825239742495102e-05, 'learning_rate': 3.310972895084518, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:51<19:56, 3.92s/it] 42%|████▏ | 216/520 [13:55<19:55, 3.93s/it] {'loss': 8.2432, 'grad_norm': 9.218733799680507e-05, 'learning_rate': 3.2962166256292114, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:55<19:55, 3.93s/it] 42%|████▏ | 217/520 [13:59<19:54, 3.94s/it] {'loss': 8.3873, 'grad_norm': 7.510784446823701e-05, 'learning_rate': 3.2814294198641116, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:59<19:54, 3.94s/it] 42%|████▏ | 218/520 [14:03<19:48, 3.93s/it] {'loss': 8.4137, 'grad_norm': 7.222003669690232e-05, 'learning_rate': 3.266611852333336, 'epoch': 0.42} + 42%|████▏ | 218/520 [14:03<19:48, 3.93s/it] 42%|████▏ | 219/520 [14:07<19:48, 3.95s/it] {'loss': 8.2597, 'grad_norm': 0.00013860459375643962, 'learning_rate': 3.2517644987606826, 'epoch': 0.42} + 42%|████▏ | 219/520 [14:07<19:48, 3.95s/it] 42%|████▏ | 220/520 [14:11<19:44, 3.95s/it] {'loss': 9.0404, 'grad_norm': 7.580745896260214e-05, 'learning_rate': 3.2368879360272604, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:11<19:44, 3.95s/it] 42%|████▎ | 221/520 [14:15<19:34, 3.93s/it] {'loss': 8.2795, 'grad_norm': 9.462619380980582e-05, 'learning_rate': 3.2219827421490748, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:15<19:34, 3.93s/it] 43%|████▎ | 222/520 [14:19<19:29, 3.92s/it] {'loss': 8.1826, 'grad_norm': 0.00013872569431281096, 'learning_rate': 3.2070494962545686, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:19<19:29, 3.92s/it] 43%|████▎ | 223/520 [14:23<19:16, 3.90s/it] {'loss': 8.1803, 'grad_norm': 9.504071295085464e-05, 'learning_rate': 3.1920887785621233, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:23<19:16, 3.90s/it] 43%|████▎ | 224/520 [14:26<19:14, 3.90s/it] {'loss': 10.1901, 'grad_norm': 0.00018420238747931386, 'learning_rate': 3.177101170357513, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:26<19:14, 3.90s/it] 43%|████▎ | 225/520 [14:30<19:08, 3.89s/it] {'loss': 8.3496, 'grad_norm': 6.738582681589039e-05, 'learning_rate': 3.162087253971318, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:30<19:08, 3.89s/it] 43%|████▎ | 226/520 [14:34<19:01, 3.88s/it] {'loss': 8.4859, 'grad_norm': 4.869417789425543e-05, 'learning_rate': 3.147047612756302, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:34<19:01, 3.88s/it] 44%|████▎ | 227/520 [14:38<18:55, 3.88s/it] {'loss': 8.3099, 'grad_norm': 6.165855353884672e-05, 'learning_rate': 3.1319828310647435, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:38<18:55, 3.88s/it] 44%|████▍ | 228/520 [14:42<18:49, 3.87s/it] {'loss': 9.3971, 'grad_norm': 9.075534507295572e-05, 'learning_rate': 3.116893494225734, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:42<18:49, 3.87s/it] 44%|████▍ | 229/520 [14:46<18:42, 3.86s/it] {'loss': 8.3975, 'grad_norm': 6.205896752901747e-05, 'learning_rate': 3.101780188522433, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:46<18:42, 3.86s/it] 44%|████▍ | 230/520 [14:50<18:38, 3.86s/it] {'loss': 8.2075, 'grad_norm': 6.75273967678728e-05, 'learning_rate': 3.0866435011692883, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:50<18:38, 3.86s/it] 44%|████▍ | 231/520 [14:53<18:35, 3.86s/it] {'loss': 8.4152, 'grad_norm': 6.415764630725678e-05, 'learning_rate': 3.071484020289224, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:53<18:35, 3.86s/it] 45%|████▍ | 232/520 [14:57<18:38, 3.88s/it] {'loss': 9.3764, 'grad_norm': 9.938595906445596e-05, 'learning_rate': 3.056302334890786, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:57<18:38, 3.88s/it] 45%|████▍ | 233/520 [15:01<18:20, 3.83s/it] {'loss': 8.9046, 'grad_norm': 7.102313432586871e-05, 'learning_rate': 3.0410990348452573, 'epoch': 0.45} + 45%|████▍ | 233/520 [15:01<18:20, 3.83s/it] 45%|████▌ | 234/520 [15:05<18:03, 3.79s/it] {'loss': 8.1453, 'grad_norm': 6.694118096157271e-05, 'learning_rate': 3.0258747108637394, 'epoch': 0.45} + 45%|████▌ | 234/520 [15:05<18:03, 3.79s/it] 45%|████▌ | 235/520 [15:08<17:52, 3.76s/it] {'loss': 8.3834, 'grad_norm': 5.143850791640236e-05, 'learning_rate': 3.010629954474201, 'epoch': 0.45} + 45%|████▌ | 235/520 [15:08<17:52, 3.76s/it] 45%|████▌ | 236/520 [15:12<17:41, 3.74s/it] {'loss': 8.7426, 'grad_norm': 5.628775270047353e-05, 'learning_rate': 2.995365357998494, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:12<17:41, 3.74s/it] 46%|████▌ | 237/520 [15:16<17:34, 3.73s/it] {'loss': 8.4705, 'grad_norm': 4.5719625936057964e-05, 'learning_rate': 2.9800815145293407, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:16<17:34, 3.73s/it] 46%|████▌ | 238/520 [15:20<17:35, 3.74s/it] {'loss': 8.2204, 'grad_norm': 5.500526128601603e-05, 'learning_rate': 2.964779017907287, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:20<17:35, 3.74s/it] 46%|████▌ | 239/520 [15:23<17:40, 3.78s/it] {'loss': 8.5123, 'grad_norm': 4.9260956375453975e-05, 'learning_rate': 2.9494584626976317, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:24<17:40, 3.78s/it] 46%|████▌ | 240/520 [15:27<17:50, 3.82s/it] {'loss': 7.9207, 'grad_norm': 7.256432919081095e-05, 'learning_rate': 2.934120444167326, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:27<17:50, 3.82s/it] 46%|████▋ | 241/520 [15:31<17:40, 3.80s/it] {'loss': 8.1482, 'grad_norm': 4.797414841244712e-05, 'learning_rate': 2.918765558261841, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:31<17:40, 3.80s/it] 47%|████▋ | 242/520 [15:35<17:27, 3.77s/it] {'loss': 8.3019, 'grad_norm': 4.724868646614415e-05, 'learning_rate': 2.903394401582017, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:35<17:27, 3.77s/it] 47%|████▋ | 243/520 [15:39<17:18, 3.75s/it] {'loss': 8.2598, 'grad_norm': 5.8065132052502655e-05, 'learning_rate': 2.8880075713608786, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:39<17:18, 3.75s/it] 47%|████▋ | 244/520 [15:42<17:11, 3.74s/it] {'loss': 8.5875, 'grad_norm': 5.2245391709007216e-05, 'learning_rate': 2.8726056654404357, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:42<17:11, 3.74s/it] 47%|████▋ | 245/520 [15:46<17:04, 3.72s/it] {'loss': 8.2806, 'grad_norm': 5.722413077381297e-05, 'learning_rate': 2.8571892822484504, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:46<17:04, 3.72s/it] 47%|████▋ | 246/520 [15:50<16:59, 3.72s/it] {'loss': 9.2713, 'grad_norm': 6.66913774917775e-05, 'learning_rate': 2.8417590207751835, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:50<16:59, 3.72s/it] 48%|████▊ | 247/520 [15:53<16:50, 3.70s/it] {'loss': 8.6839, 'grad_norm': 6.0431602860226723e-05, 'learning_rate': 2.8263154805501296, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:53<16:50, 3.70s/it] 48%|████▊ | 248/520 [15:57<16:47, 3.71s/it] {'loss': 8.2289, 'grad_norm': 4.7169909515761663e-05, 'learning_rate': 2.810859261618713, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:57<16:47, 3.71s/it] 48%|████▊ | 249/520 [16:01<16:41, 3.70s/it] {'loss': 8.5636, 'grad_norm': 3.9786523938801004e-05, 'learning_rate': 2.7953909645189823, 'epoch': 0.48} + 48%|████▊ | 249/520 [16:01<16:41, 3.70s/it] 48%|████▊ | 250/520 [16:05<16:46, 3.73s/it] {'loss': 8.2451, 'grad_norm': 5.9326823912997914e-05, 'learning_rate': 2.77991119025827, 'epoch': 0.48} + 48%|████▊ | 250/520 [16:05<16:46, 3.73s/it] 48%|████▊ | 251/520 [16:08<16:37, 3.71s/it] {'loss': 8.5184, 'grad_norm': 3.926938583628852e-05, 'learning_rate': 2.7644205402898447, 'epoch': 0.48} + 48%|████▊ | 251/520 [16:08<16:37, 3.71s/it] 48%|████▊ | 252/520 [16:12<16:33, 3.71s/it] {'loss': 9.0479, 'grad_norm': 5.310044353117579e-05, 'learning_rate': 2.748919616489542, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:12<16:33, 3.71s/it] 49%|████▊ | 253/520 [16:16<16:30, 3.71s/it] {'loss': 8.372, 'grad_norm': 6.0241666772200544e-05, 'learning_rate': 2.7334090211323763, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:16<16:30, 3.71s/it] 49%|████▉ | 254/520 [16:19<16:29, 3.72s/it] {'loss': 8.3606, 'grad_norm': 4.198585469618277e-05, 'learning_rate': 2.717889356869146, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:19<16:29, 3.72s/it] 49%|████▉ | 255/520 [16:23<16:21, 3.70s/it] {'loss': 8.3223, 'grad_norm': 4.238064539279552e-05, 'learning_rate': 2.702361226703008, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:23<16:21, 3.70s/it] 49%|████▉ | 256/520 [16:27<16:19, 3.71s/it] {'loss': 8.3613, 'grad_norm': 5.0508468418677915e-05, 'learning_rate': 2.686825233966061, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:27<16:19, 3.71s/it] 49%|████▉ | 257/520 [16:30<16:11, 3.69s/it] {'loss': 8.4643, 'grad_norm': 4.595269502232674e-05, 'learning_rate': 2.6712819822958918, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:30<16:11, 3.69s/it] 50%|████▉ | 258/520 [16:34<16:11, 3.71s/it] {'loss': 8.4289, 'grad_norm': 4.73518833340125e-05, 'learning_rate': 2.6557320756121303, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:34<16:11, 3.71s/it] 50%|████▉ | 259/520 [16:38<16:02, 3.69s/it] {'loss': 8.5277, 'grad_norm': 5.101274770973027e-05, 'learning_rate': 2.6401761180929793, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:38<16:02, 3.69s/it] 50%|█████ | 260/520 [16:42<16:00, 3.69s/it] {'loss': 9.2864, 'grad_norm': 6.025782649207157e-05, 'learning_rate': 2.624614714151743, 'epoch': 0.5} + 50%|█████ | 260/520 [16:42<16:00, 3.69s/it] 50%|█████ | 261/520 [16:45<15:53, 3.68s/it] {'loss': 9.0524, 'grad_norm': 5.734949569700314e-05, 'learning_rate': 2.60904846841334, 'epoch': 0.5} + 50%|█████ | 261/520 [16:45<15:53, 3.68s/it] 50%|█████ | 262/520 [16:49<15:56, 3.71s/it] {'loss': 8.3667, 'grad_norm': 4.313472451562188e-05, 'learning_rate': 2.593477985690815, 'epoch': 0.5} + 50%|█████ | 262/520 [16:49<15:56, 3.71s/it] 51%|█████ | 263/520 [16:53<15:47, 3.69s/it] {'loss': 9.1606, 'grad_norm': 6.354395830962868e-05, 'learning_rate': 2.577903870961833, 'epoch': 0.51} + 51%|█████ | 263/520 [16:53<15:47, 3.69s/it] 51%|█████ | 264/520 [16:56<15:47, 3.70s/it] {'loss': 8.4687, 'grad_norm': 4.741559589769307e-05, 'learning_rate': 2.562326729345182, 'epoch': 0.51} + 51%|█████ | 264/520 [16:56<15:47, 3.70s/it] 51%|█████ | 265/520 [17:00<15:42, 3.70s/it] {'loss': 8.229, 'grad_norm': 5.819215935855839e-05, 'learning_rate': 2.546747166077256, 'epoch': 0.51} + 51%|█████ | 265/520 [17:00<15:42, 3.70s/it] 51%|█████ | 266/520 [17:04<15:36, 3.69s/it] {'loss': 8.0518, 'grad_norm': 9.236050565739092e-05, 'learning_rate': 2.531165786488538, 'epoch': 0.51} + 51%|█████ | 266/520 [17:04<15:36, 3.69s/it] 51%|█████▏ | 267/520 [17:07<15:33, 3.69s/it] {'loss': 8.3537, 'grad_norm': 6.371456060103041e-05, 'learning_rate': 2.515583195980084, 'epoch': 0.51} + 51%|█████▏ | 267/520 [17:07<15:33, 3.69s/it] 52%|█████▏ | 268/520 [17:11<15:35, 3.71s/it] {'loss': 9.3509, 'grad_norm': 8.968049591287891e-05, 'learning_rate': 2.5, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:11<15:35, 3.71s/it] 52%|█████▏ | 269/520 [17:15<15:29, 3.70s/it] {'loss': 8.4088, 'grad_norm': 4.262068968819948e-05, 'learning_rate': 2.484416804019916, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:15<15:29, 3.70s/it] 52%|█████▏ | 270/520 [17:18<15:24, 3.70s/it] {'loss': 8.914, 'grad_norm': 4.9272791697017424e-05, 'learning_rate': 2.4688342135114625, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:18<15:24, 3.70s/it] 52%|█████▏ | 271/520 [17:22<15:19, 3.69s/it] {'loss': 8.6476, 'grad_norm': 6.066967171462152e-05, 'learning_rate': 2.453252833922745, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:22<15:19, 3.69s/it] 52%|█████▏ | 272/520 [17:26<15:16, 3.69s/it] {'loss': 9.1127, 'grad_norm': 6.254750136003921e-05, 'learning_rate': 2.4376732706548183, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:26<15:16, 3.69s/it] 52%|█████▎ | 273/520 [17:30<15:16, 3.71s/it] {'loss': 9.1683, 'grad_norm': 8.196625632019742e-05, 'learning_rate': 2.4220961290381675, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:30<15:16, 3.71s/it] 53%|█████▎ | 274/520 [17:33<15:10, 3.70s/it] {'loss': 8.3155, 'grad_norm': 6.405573469404277e-05, 'learning_rate': 2.406522014309186, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:33<15:10, 3.70s/it] 53%|█████▎ | 275/520 [17:37<15:09, 3.71s/it] {'loss': 8.2142, 'grad_norm': 5.155542003961168e-05, 'learning_rate': 2.3909515315866603, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:37<15:09, 3.71s/it] 53%|█████▎ | 276/520 [17:41<15:06, 3.72s/it] {'loss': 8.516, 'grad_norm': 6.689491505039281e-05, 'learning_rate': 2.3753852858482567, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:41<15:06, 3.72s/it] 53%|█████▎ | 277/520 [17:44<15:04, 3.72s/it] {'loss': 9.1088, 'grad_norm': 8.13732630197894e-05, 'learning_rate': 2.3598238819070203, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:44<15:04, 3.72s/it] 53%|█████▎ | 278/520 [17:48<15:06, 3.75s/it] {'loss': 8.1364, 'grad_norm': 9.654135659871798e-05, 'learning_rate': 2.3442679243878697, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:48<15:06, 3.75s/it] 54%|█████▎ | 279/520 [17:52<14:57, 3.73s/it] {'loss': 8.8856, 'grad_norm': 5.3870328974086305e-05, 'learning_rate': 2.3287180177041082, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:52<14:57, 3.73s/it] 54%|█████▍ | 280/520 [17:56<15:00, 3.75s/it] {'loss': 8.2673, 'grad_norm': 5.104740706950498e-05, 'learning_rate': 2.3131747660339395, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:56<15:00, 3.75s/it] 54%|█████▍ | 281/520 [18:00<14:59, 3.77s/it] {'loss': 8.4992, 'grad_norm': 5.033828605020845e-05, 'learning_rate': 2.297638773296992, 'epoch': 0.54} + 54%|█████▍ | 281/520 [18:00<14:59, 3.77s/it] 54%|█████▍ | 282/520 [18:03<14:54, 3.76s/it] {'loss': 8.1533, 'grad_norm': 9.445349521620241e-05, 'learning_rate': 2.282110643130854, 'epoch': 0.54} + 54%|█████▍ | 282/520 [18:03<14:54, 3.76s/it] 54%|█████▍ | 283/520 [18:07<14:50, 3.76s/it] {'loss': 8.5655, 'grad_norm': 4.463967499036186e-05, 'learning_rate': 2.2665909788676237, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:07<14:50, 3.76s/it] 55%|█████▍ | 284/520 [18:11<14:46, 3.76s/it] {'loss': 8.9749, 'grad_norm': 6.722130739802104e-05, 'learning_rate': 2.251080383510459, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:11<14:46, 3.76s/it] 55%|█████▍ | 285/520 [18:15<14:37, 3.73s/it] {'loss': 8.3463, 'grad_norm': 7.007764828063275e-05, 'learning_rate': 2.2355794597101557, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:15<14:37, 3.73s/it] 55%|█████▌ | 286/520 [18:18<14:36, 3.75s/it] {'loss': 8.0933, 'grad_norm': 5.927132683680766e-05, 'learning_rate': 2.22008880974173, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:18<14:36, 3.75s/it] 55%|█████▌ | 287/520 [18:22<14:27, 3.72s/it] {'loss': 8.2784, 'grad_norm': 3.733434568164778e-05, 'learning_rate': 2.204609035481018, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:22<14:27, 3.72s/it] 55%|█████▌ | 288/520 [18:26<14:20, 3.71s/it] {'loss': 8.7953, 'grad_norm': 6.423996993215509e-05, 'learning_rate': 2.1891407383812878, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:26<14:20, 3.71s/it] 56%|█████▌ | 289/520 [18:29<14:18, 3.72s/it] {'loss': 8.1685, 'grad_norm': 5.153890996839382e-05, 'learning_rate': 2.1736845194498717, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:29<14:18, 3.72s/it] 56%|█████▌ | 290/520 [18:33<14:10, 3.70s/it] {'loss': 8.2236, 'grad_norm': 7.378307410774758e-05, 'learning_rate': 2.158240979224817, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:33<14:10, 3.70s/it] 56%|█████▌ | 291/520 [18:37<14:08, 3.70s/it] {'loss': 8.267, 'grad_norm': 5.3402562521064954e-05, 'learning_rate': 2.1428107177515505, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:37<14:08, 3.70s/it] 56%|█████▌ | 292/520 [18:40<14:08, 3.72s/it] {'loss': 8.5388, 'grad_norm': 3.271712137203223e-05, 'learning_rate': 2.127394334559564, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:40<14:08, 3.72s/it] 56%|█████▋ | 293/520 [18:44<14:01, 3.71s/it] {'loss': 8.2655, 'grad_norm': 4.647489955337749e-05, 'learning_rate': 2.111992428639121, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:44<14:01, 3.71s/it] 57%|█████▋ | 294/520 [18:48<13:55, 3.70s/it] {'loss': 8.3315, 'grad_norm': 3.981727566161647e-05, 'learning_rate': 2.096605598417983, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:48<13:55, 3.70s/it] 57%|█████▋ | 295/520 [18:52<13:52, 3.70s/it] {'loss': 9.3479, 'grad_norm': 5.453705370633733e-05, 'learning_rate': 2.081234441738159, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:52<13:52, 3.70s/it] 57%|█████▋ | 296/520 [18:55<13:50, 3.71s/it] {'loss': 8.2645, 'grad_norm': 6.807273150156723e-05, 'learning_rate': 2.0658795558326744, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:55<13:50, 3.71s/it] 57%|█████▋ | 297/520 [18:59<13:45, 3.70s/it] {'loss': 8.4494, 'grad_norm': 3.415970610397407e-05, 'learning_rate': 2.0505415373023683, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:59<13:45, 3.70s/it] 57%|█████▋ | 298/520 [19:03<13:46, 3.72s/it] {'loss': 8.5778, 'grad_norm': 5.693764061288314e-05, 'learning_rate': 2.0352209820927136, 'epoch': 0.57} + 57%|█████▋ | 298/520 [19:03<13:46, 3.72s/it] 57%|█████▊ | 299/520 [19:07<13:49, 3.75s/it] {'loss': 9.0488, 'grad_norm': 6.560900560517842e-05, 'learning_rate': 2.0199184854706598, 'epoch': 0.57} + 57%|█████▊ | 299/520 [19:07<13:49, 3.75s/it] 58%|█████▊ | 300/520 [19:10<13:48, 3.76s/it] {'loss': 8.5338, 'grad_norm': 4.3162545923556325e-05, 'learning_rate': 2.0046346420015064, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:10<13:48, 3.76s/it] 58%|█████▊ | 301/520 [19:14<13:34, 3.72s/it] {'loss': 8.5444, 'grad_norm': 4.794776392818035e-05, 'learning_rate': 1.9893700455257997, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:14<13:34, 3.72s/it] 58%|█████▊ | 302/520 [19:18<13:35, 3.74s/it] {'loss': 9.1325, 'grad_norm': 5.339430669083019e-05, 'learning_rate': 1.974125289136261, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:18<13:35, 3.74s/it] 58%|█████▊ | 303/520 [19:21<13:25, 3.71s/it] {'loss': 8.2816, 'grad_norm': 3.991409627917391e-05, 'learning_rate': 1.9589009651547429, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:21<13:25, 3.71s/it] 58%|█████▊ | 304/520 [19:25<13:23, 3.72s/it] {'loss': 8.7702, 'grad_norm': 6.575813469997045e-05, 'learning_rate': 1.9436976651092142, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:25<13:23, 3.72s/it] 59%|█████▊ | 305/520 [19:29<13:18, 3.71s/it] {'loss': 8.71, 'grad_norm': 4.276556146438628e-05, 'learning_rate': 1.9285159797107765, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:29<13:18, 3.71s/it] 59%|█████▉ | 306/520 [19:33<13:19, 3.74s/it] {'loss': 8.5359, 'grad_norm': 4.107809870420596e-05, 'learning_rate': 1.9133564988307126, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:33<13:19, 3.74s/it] 59%|█████▉ | 307/520 [19:37<13:27, 3.79s/it] {'loss': 8.4567, 'grad_norm': 3.783067456060776e-05, 'learning_rate': 1.8982198114775681, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:37<13:27, 3.79s/it] 59%|█████▉ | 308/520 [19:40<13:23, 3.79s/it] {'loss': 8.6575, 'grad_norm': 4.997954479641305e-05, 'learning_rate': 1.8831065057742657, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:40<13:23, 3.79s/it] 59%|█████▉ | 309/520 [19:44<13:31, 3.85s/it] {'loss': 8.5105, 'grad_norm': 6.037072545448168e-05, 'learning_rate': 1.868017168935256, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:44<13:31, 3.85s/it] 60%|█████▉ | 310/520 [19:48<13:20, 3.81s/it] {'loss': 8.4428, 'grad_norm': 4.616776500596267e-05, 'learning_rate': 1.852952387243698, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:48<13:20, 3.81s/it] 60%|█████▉ | 311/520 [19:52<13:07, 3.77s/it] {'loss': 8.3442, 'grad_norm': 4.371358019150084e-05, 'learning_rate': 1.8379127460286817, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:52<13:07, 3.77s/it] 60%|██████ | 312/520 [19:56<13:29, 3.89s/it] {'loss': 8.2021, 'grad_norm': 5.499974853023609e-05, 'learning_rate': 1.8228988296424875, 'epoch': 0.6} + 60%|██████ | 312/520 [19:56<13:29, 3.89s/it] 60%|██████ | 313/520 [20:00<13:13, 3.83s/it] {'loss': 8.1933, 'grad_norm': 9.18179257171424e-05, 'learning_rate': 1.8079112214378767, 'epoch': 0.6} + 60%|██████ | 313/520 [20:00<13:13, 3.83s/it] 60%|██████ | 314/520 [20:04<13:27, 3.92s/it] {'loss': 8.3648, 'grad_norm': 4.947457150641256e-05, 'learning_rate': 1.7929505037454314, 'epoch': 0.6} + 60%|██████ | 314/520 [20:04<13:27, 3.92s/it] 61%|██████ | 315/520 [20:07<13:07, 3.84s/it] {'loss': 9.1768, 'grad_norm': 6.487545791030503e-05, 'learning_rate': 1.7780172578509257, 'epoch': 0.61} + 61%|██████ | 315/520 [20:07<13:07, 3.84s/it] 61%|██████ | 316/520 [20:11<13:15, 3.90s/it] {'loss': 8.2189, 'grad_norm': 4.970086124203023e-05, 'learning_rate': 1.7631120639727393, 'epoch': 0.61} + 61%|██████ | 316/520 [20:11<13:15, 3.90s/it] 61%|██████ | 317/520 [20:15<12:59, 3.84s/it] {'loss': 8.2015, 'grad_norm': 8.208322778223064e-05, 'learning_rate': 1.7482355012393174, 'epoch': 0.61} + 61%|██████ | 317/520 [20:15<12:59, 3.84s/it] 61%|██████ | 318/520 [20:19<12:45, 3.79s/it] {'loss': 8.5188, 'grad_norm': 6.724618660699971e-05, 'learning_rate': 1.7333881476666646, 'epoch': 0.61} + 61%|██████ | 318/520 [20:19<12:45, 3.79s/it] 61%|██████▏ | 319/520 [20:23<12:52, 3.84s/it] {'loss': 8.3874, 'grad_norm': 6.917943012231763e-05, 'learning_rate': 1.7185705801358893, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:23<12:52, 3.84s/it] 62%|██████▏ | 320/520 [20:26<12:39, 3.80s/it] {'loss': 8.2108, 'grad_norm': 4.388417446036071e-05, 'learning_rate': 1.703783374370789, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:26<12:39, 3.80s/it] 62%|██████▏ | 321/520 [20:30<12:29, 3.76s/it] {'loss': 8.504, 'grad_norm': 4.48206074439057e-05, 'learning_rate': 1.6890271049154826, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:30<12:29, 3.76s/it] 62%|██████▏ | 322/520 [20:34<12:25, 3.76s/it] {'loss': 8.9525, 'grad_norm': 4.719537598250182e-05, 'learning_rate': 1.674302345112083, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:34<12:25, 3.76s/it] 62%|██████▏ | 323/520 [20:38<12:24, 3.78s/it] {'loss': 9.0885, 'grad_norm': 4.4607529915337544e-05, 'learning_rate': 1.6596096670784235, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:38<12:24, 3.78s/it] 62%|██████▏ | 324/520 [20:42<12:22, 3.79s/it] {'loss': 8.4161, 'grad_norm': 3.848917258572694e-05, 'learning_rate': 1.6449496416858285, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:42<12:22, 3.79s/it] 62%|██████▎ | 325/520 [20:45<12:19, 3.79s/it] {'loss': 8.4741, 'grad_norm': 3.183530997532007e-05, 'learning_rate': 1.6303228385369317, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:45<12:19, 3.79s/it] 63%|██████▎ | 326/520 [20:49<12:17, 3.80s/it] {'loss': 8.3382, 'grad_norm': 3.732225415657089e-05, 'learning_rate': 1.6157298259435464, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:49<12:17, 3.80s/it] 63%|██████▎ | 327/520 [20:53<12:13, 3.80s/it] {'loss': 9.07, 'grad_norm': 4.656329595874825e-05, 'learning_rate': 1.601171170904581, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:53<12:13, 3.80s/it] 63%|██████▎ | 328/520 [20:57<12:06, 3.78s/it] {'loss': 8.5646, 'grad_norm': 3.65538543241137e-05, 'learning_rate': 1.5866474390840124, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:57<12:06, 3.78s/it] 63%|██████▎ | 329/520 [21:00<11:58, 3.76s/it] {'loss': 8.3343, 'grad_norm': 5.794182038223071e-05, 'learning_rate': 1.572159194788905, 'epoch': 0.63} + 63%|██████▎ | 329/520 [21:00<11:58, 3.76s/it] 63%|██████▎ | 330/520 [21:04<11:49, 3.73s/it] {'loss': 8.4113, 'grad_norm': 3.538257389611836e-05, 'learning_rate': 1.557707000947487, 'epoch': 0.63} + 63%|██████▎ | 330/520 [21:04<11:49, 3.73s/it] 64%|██████▎ | 331/520 [21:08<11:45, 3.73s/it] {'loss': 8.2945, 'grad_norm': 4.685173889589025e-05, 'learning_rate': 1.5432914190872757, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:08<11:45, 3.73s/it] 64%|██████▍ | 332/520 [21:12<11:46, 3.76s/it] {'loss': 9.2598, 'grad_norm': 4.833628354049794e-05, 'learning_rate': 1.5289130093132632, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:12<11:46, 3.76s/it] 64%|██████▍ | 333/520 [21:15<11:44, 3.77s/it] {'loss': 8.6386, 'grad_norm': 4.180148039792321e-05, 'learning_rate': 1.514572330286152, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:15<11:44, 3.77s/it] 64%|██████▍ | 334/520 [21:19<11:38, 3.76s/it] {'loss': 8.3528, 'grad_norm': 3.292862200377475e-05, 'learning_rate': 1.500269939200648, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:19<11:38, 3.76s/it] 64%|██████▍ | 335/520 [21:23<11:31, 3.74s/it] {'loss': 8.4961, 'grad_norm': 3.460489113022985e-05, 'learning_rate': 1.4860063917638127, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:23<11:31, 3.74s/it] 65%|██████▍ | 336/520 [21:26<11:25, 3.72s/it] {'loss': 8.243, 'grad_norm': 3.543240211376463e-05, 'learning_rate': 1.4717822421734716, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:27<11:25, 3.72s/it] 65%|██████▍ | 337/520 [21:30<11:20, 3.72s/it] {'loss': 8.0955, 'grad_norm': 4.4743637035386154e-05, 'learning_rate': 1.4575980430966806, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:30<11:20, 3.72s/it] 65%|██████▌ | 338/520 [21:34<11:14, 3.70s/it] {'loss': 8.3165, 'grad_norm': 3.552018596545632e-05, 'learning_rate': 1.4434543456482518, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:34<11:14, 3.70s/it] 65%|██████▌ | 339/520 [21:38<11:14, 3.73s/it] {'loss': 8.3915, 'grad_norm': 3.7574605699246465e-05, 'learning_rate': 1.429351699369343, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:38<11:14, 3.73s/it] 65%|██████▌ | 340/520 [21:41<11:12, 3.73s/it] {'loss': 8.28, 'grad_norm': 3.380107397911157e-05, 'learning_rate': 1.4152906522061048, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:41<11:12, 3.73s/it] 66%|██████▌ | 341/520 [21:45<11:05, 3.72s/it] {'loss': 8.3899, 'grad_norm': 3.186989026291649e-05, 'learning_rate': 1.4012717504883874, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:45<11:05, 3.72s/it] 66%|██████▌ | 342/520 [21:49<11:00, 3.71s/it] {'loss': 9.3553, 'grad_norm': 5.468810566882482e-05, 'learning_rate': 1.387295538908519, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:49<11:00, 3.71s/it] 66%|██████▌ | 343/520 [21:53<10:57, 3.72s/it] {'loss': 9.1999, 'grad_norm': 4.403747563142899e-05, 'learning_rate': 1.3733625605001365, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:53<10:57, 3.72s/it] 66%|██████▌ | 344/520 [21:56<10:50, 3.70s/it] {'loss': 8.0726, 'grad_norm': 3.7879665296472414e-05, 'learning_rate': 1.3594733566170925, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:56<10:50, 3.70s/it] 66%|██████▋ | 345/520 [22:00<10:51, 3.72s/it] {'loss': 8.3903, 'grad_norm': 2.9669413485671334e-05, 'learning_rate': 1.3456284669124159, 'epoch': 0.66} + 66%|██████▋ | 345/520 [22:00<10:51, 3.72s/it] 67%|██████▋ | 346/520 [22:04<10:53, 3.75s/it] {'loss': 9.0563, 'grad_norm': 4.7703082097026104e-05, 'learning_rate': 1.331828429317345, 'epoch': 0.67} + 67%|██████▋ | 346/520 [22:04<10:53, 3.75s/it] 67%|██████▋ | 347/520 [22:08<10:59, 3.81s/it] {'loss': 8.3105, 'grad_norm': 6.177095158448866e-05, 'learning_rate': 1.3180737800204327, 'epoch': 0.67} + 67%|██████▋ | 347/520 [22:08<10:59, 3.81s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:12<10:59, 3.83s/it] {'loss': 8.0878, 'grad_norm': 7.234736617449972e-05, 'learning_rate': 1.3043650534467053, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:12<10:59, 3.83s/it] 67%|██████▋ | 349/520 [22:16<11:01, 3.87s/it] {'loss': 8.3358, 'grad_norm': 5.686094653853891e-05, 'learning_rate': 1.2907027822369006, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:16<11:01, 3.87s/it] 67%|██████▋ | 350/520 [22:19<10:58, 3.87s/it] {'loss': 8.4381, 'grad_norm': 2.9438520265609177e-05, 'learning_rate': 1.2770874972267776, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:19<10:58, 3.87s/it] 68%|██████▊ | 351/520 [22:23<10:55, 3.88s/it] {'loss': 8.0639, 'grad_norm': 4.700989956005152e-05, 'learning_rate': 1.2635197274264813, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:23<10:55, 3.88s/it] 68%|██████▊ | 352/520 [22:27<10:52, 3.88s/it] {'loss': 8.4773, 'grad_norm': 3.15473706764455e-05, 'learning_rate': 1.2500000000000004, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:27<10:52, 3.88s/it] 68%|██████▊ | 353/520 [22:31<10:39, 3.83s/it] {'loss': 8.8223, 'grad_norm': 3.3248898326508244e-05, 'learning_rate': 1.236528840244674, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:31<10:39, 3.83s/it] 68%|██████▊ | 354/520 [22:35<10:27, 3.78s/it] {'loss': 9.1179, 'grad_norm': 4.492342922441284e-05, 'learning_rate': 1.2231067715707866, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:35<10:27, 3.78s/it] 68%|██████▊ | 355/520 [22:38<10:20, 3.76s/it] {'loss': 8.2929, 'grad_norm': 3.8033942275238806e-05, 'learning_rate': 1.2097343154812332, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:38<10:20, 3.76s/it] 68%|██████▊ | 356/520 [22:42<10:12, 3.74s/it] {'loss': 8.2232, 'grad_norm': 4.9995592894158714e-05, 'learning_rate': 1.196411991551255, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:42<10:12, 3.74s/it] 69%|██████▊ | 357/520 [22:46<10:07, 3.72s/it] {'loss': 8.2917, 'grad_norm': 5.487354551391404e-05, 'learning_rate': 1.183140317408248, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:46<10:07, 3.72s/it] 69%|██████▉ | 358/520 [22:49<10:01, 3.71s/it] {'loss': 8.2491, 'grad_norm': 3.8333855243114596e-05, 'learning_rate': 1.169919808711659, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:49<10:01, 3.71s/it] 69%|██████▉ | 359/520 [22:53<09:57, 3.71s/it] {'loss': 8.9235, 'grad_norm': 4.042900574396569e-05, 'learning_rate': 1.15675097913294, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:53<09:57, 3.71s/it] 69%|██████▉ | 360/520 [22:57<09:55, 3.72s/it] {'loss': 9.0168, 'grad_norm': 5.125049334848904e-05, 'learning_rate': 1.1436343403356017, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:57<09:55, 3.72s/it] 69%|██████▉ | 361/520 [23:01<09:52, 3.73s/it] {'loss': 9.1944, 'grad_norm': 4.564041045855446e-05, 'learning_rate': 1.130570401955322, 'epoch': 0.69} + 69%|██████▉ | 361/520 [23:01<09:52, 3.73s/it] 70%|██████▉ | 362/520 [23:04<09:47, 3.72s/it] {'loss': 8.1641, 'grad_norm': 3.664151700424224e-05, 'learning_rate': 1.1175596715801515, 'epoch': 0.7} + 70%|██████▉ | 362/520 [23:04<09:47, 3.72s/it] 70%|██████▉ | 363/520 [23:08<09:44, 3.72s/it] {'loss': 8.5112, 'grad_norm': 2.773958812008982e-05, 'learning_rate': 1.1046026547307906, 'epoch': 0.7} + 70%|██████▉ | 363/520 [23:08<09:44, 3.72s/it] 70%|███████ | 364/520 [23:12<09:41, 3.73s/it] {'loss': 8.9438, 'grad_norm': 4.740098905727728e-05, 'learning_rate': 1.091699854840945, 'epoch': 0.7} + 70%|███████ | 364/520 [23:12<09:41, 3.73s/it] 70%|███████ | 365/520 [23:15<09:35, 3.71s/it] {'loss': 8.5354, 'grad_norm': 3.03667227028794e-05, 'learning_rate': 1.0788517732377696, 'epoch': 0.7} + 70%|███████ | 365/520 [23:15<09:35, 3.71s/it] 70%|███████ | 366/520 [23:19<09:30, 3.71s/it] {'loss': 8.4003, 'grad_norm': 2.58133827802155e-05, 'learning_rate': 1.0660589091223853, 'epoch': 0.7} + 70%|███████ | 366/520 [23:19<09:30, 3.71s/it] 71%|███████ | 367/520 [23:23<09:27, 3.71s/it] {'loss': 8.3907, 'grad_norm': 3.033128333130835e-05, 'learning_rate': 1.0533217595504856, 'epoch': 0.71} + 71%|███████ | 367/520 [23:23<09:27, 3.71s/it] 71%|███████ | 368/520 [23:26<09:22, 3.70s/it] {'loss': 8.11, 'grad_norm': 4.418215281704013e-05, 'learning_rate': 1.0406408194130259, 'epoch': 0.71} + 71%|███████ | 368/520 [23:26<09:22, 3.70s/it] 71%|███████ | 369/520 [23:30<09:19, 3.70s/it] {'loss': 9.0921, 'grad_norm': 4.1408817923155767e-05, 'learning_rate': 1.0280165814169884, 'epoch': 0.71} + 71%|███████ | 369/520 [23:30<09:19, 3.70s/it] 71%|███████ | 370/520 [23:34<09:14, 3.69s/it] {'loss': 8.4103, 'grad_norm': 3.6634602088967654e-05, 'learning_rate': 1.0154495360662463, 'epoch': 0.71} + 71%|███████ | 370/520 [23:34<09:14, 3.69s/it] 71%|███████▏ | 371/520 [23:38<09:11, 3.70s/it] {'loss': 8.2856, 'grad_norm': 3.446725359378337e-05, 'learning_rate': 1.0029401716424993, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:38<09:11, 3.70s/it] 72%|███████▏ | 372/520 [23:41<09:10, 3.72s/it] {'loss': 9.1574, 'grad_norm': 5.2596505828349946e-05, 'learning_rate': 0.990488974186306, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:41<09:10, 3.72s/it] 72%|███████▏ | 373/520 [23:45<09:07, 3.72s/it] {'loss': 8.9476, 'grad_norm': 5.4511922451410044e-05, 'learning_rate': 0.9780964274781984, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:45<09:07, 3.72s/it] 72%|███████▏ | 374/520 [23:49<09:09, 3.76s/it] {'loss': 8.4294, 'grad_norm': 3.433129806744731e-05, 'learning_rate': 0.9657630130198819, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:49<09:09, 3.76s/it] 72%|███████▏ | 375/520 [23:53<09:03, 3.75s/it] {'loss': 8.2149, 'grad_norm': 4.292625547139256e-05, 'learning_rate': 0.9534892100155296, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:53<09:03, 3.75s/it] 72%|███████▏ | 376/520 [23:56<09:00, 3.75s/it] {'loss': 8.5232, 'grad_norm': 3.8885492214154714e-05, 'learning_rate': 0.9412754953531663, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:56<09:00, 3.75s/it] 72%|███████▎ | 377/520 [24:00<08:55, 3.74s/it] {'loss': 8.4371, 'grad_norm': 3.03483480454496e-05, 'learning_rate': 0.9291223435861318, 'epoch': 0.72} + 72%|███████▎ | 377/520 [24:00<08:55, 3.74s/it] 73%|███████▎ | 378/520 [24:04<08:48, 3.72s/it] {'loss': 8.6568, 'grad_norm': 3.430906743328935e-05, 'learning_rate': 0.9170302269146507, 'epoch': 0.73} + 73%|███████▎ | 378/520 [24:04<08:48, 3.72s/it] 73%|███████▎ | 379/520 [24:08<08:43, 3.72s/it] {'loss': 8.555, 'grad_norm': 3.949124009934342e-05, 'learning_rate': 0.9049996151674788, 'epoch': 0.73} + 73%|███████▎ | 379/520 [24:08<08:43, 3.72s/it] 73%|███████▎ | 380/520 [24:11<08:38, 3.71s/it] {'loss': 9.2333, 'grad_norm': 4.459206081141058e-05, 'learning_rate': 0.8930309757836516, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:11<08:38, 3.71s/it] 73%|███████▎ | 381/520 [24:15<08:36, 3.72s/it] {'loss': 8.5021, 'grad_norm': 3.3552856283501475e-05, 'learning_rate': 0.8811247737943242, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:15<08:36, 3.72s/it] 73%|███████▎ | 382/520 [24:19<08:32, 3.72s/it] {'loss': 9.0912, 'grad_norm': 4.6370502078996364e-05, 'learning_rate': 0.869281471804698, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:19<08:32, 3.72s/it] 74%|███████▎ | 383/520 [24:22<08:28, 3.71s/it] {'loss': 8.0094, 'grad_norm': 5.853835600164185e-05, 'learning_rate': 0.8575015299760491, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:22<08:28, 3.71s/it] 74%|███████▍ | 384/520 [24:26<08:24, 3.71s/it] {'loss': 9.8907, 'grad_norm': 8.08941451146672e-05, 'learning_rate': 0.845785406007852, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:26<08:24, 3.71s/it] 74%|███████▍ | 385/520 [24:30<08:19, 3.70s/it] {'loss': 8.5584, 'grad_norm': 4.640508272187559e-05, 'learning_rate': 0.8341335551199902, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:30<08:19, 3.70s/it] 74%|███████▍ | 386/520 [24:33<08:14, 3.69s/it] {'loss': 8.1963, 'grad_norm': 6.14201238430159e-05, 'learning_rate': 0.8225464300350751, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:33<08:14, 3.69s/it] 74%|███████▍ | 387/520 [24:37<08:13, 3.71s/it] {'loss': 9.4754, 'grad_norm': 6.3554268010872e-05, 'learning_rate': 0.8110244809608493, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:37<08:13, 3.71s/it] 75%|███████▍ | 388/520 [24:41<08:09, 3.70s/it] {'loss': 8.0612, 'grad_norm': 4.33881054971839e-05, 'learning_rate': 0.799568155572701, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:41<08:09, 3.70s/it] 75%|███████▍ | 389/520 [24:45<08:06, 3.71s/it] {'loss': 8.2843, 'grad_norm': 5.0800180815049505e-05, 'learning_rate': 0.7881778989962662, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:45<08:06, 3.71s/it] 75%|███████▌ | 390/520 [24:48<08:03, 3.72s/it] {'loss': 8.496, 'grad_norm': 3.4776130230705425e-05, 'learning_rate': 0.7768541537901325, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:48<08:03, 3.72s/it] 75%|███████▌ | 391/520 [24:52<07:59, 3.72s/it] {'loss': 8.6541, 'grad_norm': 3.717938846916314e-05, 'learning_rate': 0.7655973599286459, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:52<07:59, 3.72s/it] 75%|███████▌ | 392/520 [24:56<07:56, 3.72s/it] {'loss': 8.2407, 'grad_norm': 3.9840595398369033e-05, 'learning_rate': 0.7544079547848181, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:56<07:56, 3.72s/it] 76%|███████▌ | 393/520 [25:00<07:54, 3.73s/it] {'loss': 9.0269, 'grad_norm': 4.7010442930889746e-05, 'learning_rate': 0.7432863731133271, 'epoch': 0.76} + 76%|███████▌ | 393/520 [25:00<07:54, 3.73s/it] 76%|███████▌ | 394/520 [25:03<07:50, 3.73s/it] {'loss': 8.3732, 'grad_norm': 2.97043723093619e-05, 'learning_rate': 0.7322330470336313, 'epoch': 0.76} + 76%|███████▌ | 394/520 [25:03<07:50, 3.73s/it] 76%|███████▌ | 395/520 [25:07<07:46, 3.73s/it] {'loss': 8.3078, 'grad_norm': 3.1516914263248944e-05, 'learning_rate': 0.7212484060131752, 'epoch': 0.76} + 76%|███████▌ | 395/520 [25:07<07:46, 3.73s/it] 76%|███████▌ | 396/520 [25:11<07:43, 3.74s/it] {'loss': 8.4031, 'grad_norm': 2.755415642203011e-05, 'learning_rate': 0.7103328768507039, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:11<07:43, 3.74s/it] 76%|███████▋ | 397/520 [25:14<07:39, 3.73s/it] {'loss': 8.4429, 'grad_norm': 2.8180890710536748e-05, 'learning_rate': 0.699486883659684, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:14<07:39, 3.73s/it] 77%|███████▋ | 398/520 [25:18<07:39, 3.77s/it] {'loss': 8.3924, 'grad_norm': 3.714417841726367e-05, 'learning_rate': 0.6887108478518184, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:18<07:39, 3.77s/it] 77%|███████▋ | 399/520 [25:22<07:41, 3.81s/it] {'loss': 8.8646, 'grad_norm': 3.8058924560464715e-05, 'learning_rate': 0.6780051881206792, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:22<07:41, 3.81s/it] 77%|███████▋ | 400/520 [25:26<07:39, 3.83s/it] {'loss': 8.9122, 'grad_norm': 3.39693913595244e-05, 'learning_rate': 0.6673703204254346, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:26<07:39, 3.83s/it] 77%|███████▋ | 401/520 [25:30<07:43, 3.90s/it] {'loss': 8.0366, 'grad_norm': 6.954977469392038e-05, 'learning_rate': 0.6568066579746901, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:30<07:43, 3.90s/it] 77%|███████▋ | 402/520 [25:34<07:42, 3.92s/it] {'loss': 8.088, 'grad_norm': 3.830777827170939e-05, 'learning_rate': 0.6463146112104332, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:34<07:42, 3.92s/it] 78%|███████▊ | 403/520 [25:38<07:38, 3.92s/it] {'loss': 8.3874, 'grad_norm': 3.481094122385623e-05, 'learning_rate': 0.6358945877920861, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:38<07:38, 3.92s/it] 78%|███████▊ | 404/520 [25:42<07:27, 3.86s/it] {'loss': 8.1079, 'grad_norm': 4.527899576089447e-05, 'learning_rate': 0.6255469925806643, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:42<07:27, 3.86s/it] 78%|███████▊ | 405/520 [25:45<07:19, 3.82s/it] {'loss': 8.8354, 'grad_norm': 3.345639076050854e-05, 'learning_rate': 0.6152722276230504, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:46<07:19, 3.82s/it] 78%|███████▊ | 406/520 [25:49<07:13, 3.80s/it] {'loss': 8.4018, 'grad_norm': 5.9417306576653214e-05, 'learning_rate': 0.6050706921363671, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:49<07:13, 3.80s/it] 78%|███████▊ | 407/520 [25:53<07:07, 3.78s/it] {'loss': 8.7301, 'grad_norm': 4.249885670062e-05, 'learning_rate': 0.594942782492473, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:53<07:07, 3.78s/it] 78%|███████▊ | 408/520 [25:57<07:00, 3.75s/it] {'loss': 8.3224, 'grad_norm': 3.47056058143694e-05, 'learning_rate': 0.5848888922025552, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:57<07:00, 3.75s/it] 79%|███████▊ | 409/520 [26:00<06:56, 3.75s/it] {'loss': 8.6334, 'grad_norm': 3.669351085994748e-05, 'learning_rate': 0.574909411901843, 'epoch': 0.79} + 79%|███████▊ | 409/520 [26:00<06:56, 3.75s/it] 79%|███████▉ | 410/520 [26:04<06:49, 3.72s/it] {'loss': 7.9965, 'grad_norm': 5.410724486585564e-05, 'learning_rate': 0.5650047293344316, 'epoch': 0.79} + 79%|███████▉ | 410/520 [26:04<06:49, 3.72s/it] 79%|███████▉ | 411/520 [26:08<06:45, 3.72s/it] {'loss': 8.4875, 'grad_norm': 2.818593400399406e-05, 'learning_rate': 0.5551752293382131, 'epoch': 0.79} + 79%|███████▉ | 411/520 [26:08<06:45, 3.72s/it] 79%|███████▉ | 412/520 [26:12<06:46, 3.76s/it] {'loss': 8.4113, 'grad_norm': 2.8616416518877957e-05, 'learning_rate': 0.5454212938299254, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:12<06:46, 3.76s/it] 79%|███████▉ | 413/520 [26:15<06:41, 3.75s/it] {'loss': 9.2234, 'grad_norm': 5.704096796500161e-05, 'learning_rate': 0.5357433017903163, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:15<06:41, 3.75s/it] 80%|███████▉ | 414/520 [26:19<06:38, 3.76s/it] {'loss': 8.7409, 'grad_norm': 3.824870026971154e-05, 'learning_rate': 0.5261416292494117, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:19<06:38, 3.76s/it] 80%|███████▉ | 415/520 [26:23<06:32, 3.74s/it] {'loss': 8.1266, 'grad_norm': 4.558652895495674e-05, 'learning_rate': 0.5166166492719124, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:23<06:32, 3.74s/it] 80%|████████ | 416/520 [26:27<06:27, 3.72s/it] {'loss': 8.0513, 'grad_norm': 6.583419786397387e-05, 'learning_rate': 0.5071687319426945, 'epoch': 0.8} + 80%|████████ | 416/520 [26:27<06:27, 3.72s/it] 80%|████████ | 417/520 [26:30<06:22, 3.72s/it] {'loss': 8.4996, 'grad_norm': 3.7565811416272033e-05, 'learning_rate': 0.49779824435243036, 'epoch': 0.8} + 80%|████████ | 417/520 [26:30<06:22, 3.72s/it] 80%|████████ | 418/520 [26:34<06:17, 3.70s/it] {'loss': 8.6096, 'grad_norm': 3.718464407166791e-05, 'learning_rate': 0.4885055505833291, 'epoch': 0.8} + 80%|████████ | 418/520 [26:34<06:17, 3.70s/it] 81%|████████ | 419/520 [26:38<06:13, 3.70s/it] {'loss': 8.3543, 'grad_norm': 3.516546915074374e-05, 'learning_rate': 0.47929101169498695, 'epoch': 0.81} + 81%|████████ | 419/520 [26:38<06:13, 3.70s/it] 81%|████████ | 420/520 [26:41<06:09, 3.69s/it] {'loss': 8.1746, 'grad_norm': 3.82495825066565e-05, 'learning_rate': 0.47015498571035874, 'epoch': 0.81} + 81%|████████ | 420/520 [26:41<06:09, 3.69s/it] 81%|████████ | 421/520 [26:45<06:05, 3.69s/it] {'loss': 7.8516, 'grad_norm': 7.423193973246117e-05, 'learning_rate': 0.4610978276018496, 'epoch': 0.81} + 81%|████████ | 421/520 [26:45<06:05, 3.69s/it] 81%|████████ | 422/520 [26:49<06:02, 3.70s/it] {'loss': 8.3006, 'grad_norm': 4.036051953824242e-05, 'learning_rate': 0.4521198892775202, 'epoch': 0.81} + 81%|████████ | 422/520 [26:49<06:02, 3.70s/it] 81%|████████▏ | 423/520 [26:52<05:59, 3.71s/it] {'loss': 8.3091, 'grad_norm': 4.5131772725579296e-05, 'learning_rate': 0.4432215195674166, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:52<05:59, 3.71s/it] 82%|████████▏ | 424/520 [26:56<05:56, 3.71s/it] {'loss': 9.2796, 'grad_norm': 4.8923353610755564e-05, 'learning_rate': 0.4344030642100133, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:56<05:56, 3.71s/it] 82%|████████▏ | 425/520 [27:00<05:53, 3.72s/it] {'loss': 8.2046, 'grad_norm': 3.999154499304084e-05, 'learning_rate': 0.42566486583878127, 'epoch': 0.82} + 82%|████████▏ | 425/520 [27:00<05:53, 3.72s/it] 82%|████████▏ | 426/520 [27:04<05:49, 3.72s/it] {'loss': 8.6528, 'grad_norm': 3.289942730758314e-05, 'learning_rate': 0.41700726396887794, 'epoch': 0.82} + 82%|████████▏ | 426/520 [27:04<05:49, 3.72s/it] 82%|████████▏ | 427/520 [27:07<05:46, 3.73s/it] {'loss': 8.1965, 'grad_norm': 5.569238917428784e-05, 'learning_rate': 0.4084305949839506, 'epoch': 0.82} + 82%|████████▏ | 427/520 [27:07<05:46, 3.73s/it] 82%|████████▏ | 428/520 [27:11<05:42, 3.73s/it] {'loss': 8.0169, 'grad_norm': 5.40263046504549e-05, 'learning_rate': 0.3999351921230715, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:11<05:42, 3.73s/it] 82%|████████▎ | 429/520 [27:15<05:38, 3.72s/it] {'loss': 8.2661, 'grad_norm': 3.3578784579266406e-05, 'learning_rate': 0.39152138546778625, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:15<05:38, 3.72s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:18<05:34, 3.71s/it] {'loss': 8.228, 'grad_norm': 6.751654689793552e-05, 'learning_rate': 0.3831895019292897, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:18<05:34, 3.71s/it] 83%|████████▎ | 431/520 [27:22<05:31, 3.72s/it] {'loss': 9.2269, 'grad_norm': 4.4613894464158355e-05, 'learning_rate': 0.3749398652357272, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:22<05:31, 3.72s/it] 83%|████████▎ | 432/520 [27:26<05:26, 3.71s/it] {'loss': 8.1698, 'grad_norm': 4.2422657782364474e-05, 'learning_rate': 0.366772795919611, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:26<05:26, 3.71s/it] 83%|████████▎ | 433/520 [27:30<05:23, 3.72s/it] {'loss': 8.3606, 'grad_norm': 2.9286856029226908e-05, 'learning_rate': 0.35868861130537166, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:30<05:23, 3.72s/it] 83%|████████▎ | 434/520 [27:33<05:23, 3.76s/it] {'loss': 7.8242, 'grad_norm': 7.171887206482504e-05, 'learning_rate': 0.35068762549702426, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:33<05:23, 3.76s/it] 84%|████████▎ | 435/520 [27:37<05:25, 3.83s/it] {'loss': 8.5089, 'grad_norm': 3.0150607724537866e-05, 'learning_rate': 0.3427701493659674, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:37<05:25, 3.83s/it] 84%|████████▍ | 436/520 [27:41<05:23, 3.85s/it] {'loss': 8.2082, 'grad_norm': 4.0386193734415076e-05, 'learning_rate': 0.33493649053890323, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:41<05:23, 3.85s/it] 84%|████████▍ | 437/520 [27:45<05:22, 3.88s/it] {'loss': 8.6153, 'grad_norm': 3.3438021939391405e-05, 'learning_rate': 0.327186953385884, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:45<05:22, 3.88s/it] 84%|████████▍ | 438/520 [27:49<05:20, 3.90s/it] {'loss': 8.0803, 'grad_norm': 4.797143795872458e-05, 'learning_rate': 0.3195218390084867, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:49<05:20, 3.90s/it] 84%|████████▍ | 439/520 [27:53<05:17, 3.91s/it] {'loss': 8.9897, 'grad_norm': 3.736029199922233e-05, 'learning_rate': 0.3119414452281158, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:53<05:17, 3.91s/it] 85%|████████▍ | 440/520 [27:57<05:13, 3.92s/it] {'loss': 8.4377, 'grad_norm': 2.8372436901557307e-05, 'learning_rate': 0.30444606657442835, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:57<05:13, 3.92s/it] 85%|████████▍ | 441/520 [28:01<05:10, 3.93s/it] {'loss': 8.9622, 'grad_norm': 3.1552676593783345e-05, 'learning_rate': 0.297035994273894, 'epoch': 0.85} + 85%|████████▍ | 441/520 [28:01<05:10, 3.93s/it] 85%|████████▌ | 442/520 [28:05<05:06, 3.92s/it] {'loss': 8.141, 'grad_norm': 3.6151201910112016e-05, 'learning_rate': 0.28971151623847585, 'epoch': 0.85} + 85%|████████▌ | 442/520 [28:05<05:06, 3.92s/it] 85%|████████▌ | 443/520 [28:09<05:03, 3.94s/it] {'loss': 8.4337, 'grad_norm': 3.2912785312740414e-05, 'learning_rate': 0.2824729170544457, 'epoch': 0.85} + 85%|████████▌ | 443/520 [28:09<05:03, 3.94s/it] 85%|████████▌ | 444/520 [28:13<04:59, 3.93s/it] {'loss': 8.3023, 'grad_norm': 2.905116361809062e-05, 'learning_rate': 0.27532047797132864, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:13<04:59, 3.93s/it] 86%|████████▌ | 445/520 [28:17<04:56, 3.95s/it] {'loss': 8.2693, 'grad_norm': 3.876608481078388e-05, 'learning_rate': 0.2682544768909717, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:17<04:56, 3.95s/it] 86%|████████▌ | 446/520 [28:21<04:51, 3.94s/it] {'loss': 8.9221, 'grad_norm': 4.299169930783609e-05, 'learning_rate': 0.2612751883567477, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:21<04:51, 3.94s/it] 86%|████████▌ | 447/520 [28:25<04:48, 3.95s/it] {'loss': 8.4384, 'grad_norm': 3.437539088437135e-05, 'learning_rate': 0.2543828835428899, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:25<04:48, 3.95s/it] 86%|████████▌ | 448/520 [28:29<04:45, 3.96s/it] {'loss': 8.2933, 'grad_norm': 3.1579559386530597e-05, 'learning_rate': 0.2475778302439524, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:29<04:45, 3.96s/it] 86%|████████▋ | 449/520 [28:33<04:39, 3.94s/it] {'loss': 9.0385, 'grad_norm': 4.0987560966942114e-05, 'learning_rate': 0.2408602928644088, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:33<04:39, 3.94s/it] 87%|████████▋ | 450/520 [28:37<04:35, 3.93s/it] {'loss': 8.5156, 'grad_norm': 2.5562049699424392e-05, 'learning_rate': 0.23423053240837516, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:37<04:35, 3.93s/it] 87%|████████▋ | 451/520 [28:40<04:30, 3.91s/it] {'loss': 8.4952, 'grad_norm': 2.6488820303205416e-05, 'learning_rate': 0.22768880646947265, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:40<04:30, 3.91s/it] 87%|████████▋ | 452/520 [28:44<04:26, 3.91s/it] {'loss': 9.1605, 'grad_norm': 3.70622245589459e-05, 'learning_rate': 0.22123536922081716, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:44<04:26, 3.91s/it] 87%|████████▋ | 453/520 [28:48<04:22, 3.92s/it] {'loss': 8.8821, 'grad_norm': 4.127906523113089e-05, 'learning_rate': 0.21487047140514248, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:48<04:22, 3.92s/it] 87%|████████▋ | 454/520 [28:52<04:18, 3.91s/it] {'loss': 8.2332, 'grad_norm': 3.1222078404318005e-05, 'learning_rate': 0.2085943603250595, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:52<04:18, 3.91s/it] 88%|████████▊ | 455/520 [28:56<04:13, 3.90s/it] {'loss': 8.4263, 'grad_norm': 2.9911809204914975e-05, 'learning_rate': 0.20240727983344836, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:56<04:13, 3.90s/it] 88%|████████▊ | 456/520 [29:00<04:10, 3.91s/it] {'loss': 8.3407, 'grad_norm': 3.678775009424497e-05, 'learning_rate': 0.19630947032398066, 'epoch': 0.88} + 88%|████████▊ | 456/520 [29:00<04:10, 3.91s/it] 88%|████████▊ | 457/520 [29:04<04:05, 3.90s/it] {'loss': 9.4794, 'grad_norm': 6.464270307560069e-05, 'learning_rate': 0.19030116872178315, 'epoch': 0.88} + 88%|████████▊ | 457/520 [29:04<04:05, 3.90s/it] 88%|████████▊ | 458/520 [29:08<04:02, 3.91s/it] {'loss': 8.6105, 'grad_norm': 3.0513516583601234e-05, 'learning_rate': 0.18438260847422838, 'epoch': 0.88} + 88%|████████▊ | 458/520 [29:08<04:02, 3.91s/it] 88%|████████▊ | 459/520 [29:12<03:58, 3.91s/it] {'loss': 8.4223, 'grad_norm': 2.6057886632619796e-05, 'learning_rate': 0.17855401954186612, 'epoch': 0.88} + 88%|████████▊ | 459/520 [29:12<03:58, 3.91s/it] 88%|████████▊ | 460/520 [29:16<03:54, 3.90s/it] {'loss': 7.9938, 'grad_norm': 3.573334480545364e-05, 'learning_rate': 0.17281562838948966, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:16<03:54, 3.90s/it] 89%|████████▊ | 461/520 [29:20<03:50, 3.91s/it] {'loss': 9.7466, 'grad_norm': 7.010401647478995e-05, 'learning_rate': 0.16716765797733374, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:20<03:50, 3.91s/it] 89%|████████▉ | 462/520 [29:23<03:46, 3.90s/it] {'loss': 9.2876, 'grad_norm': 5.696141674322481e-05, 'learning_rate': 0.16161032775241502, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:23<03:46, 3.90s/it] 89%|████████▉ | 463/520 [29:27<03:42, 3.90s/it] {'loss': 8.0311, 'grad_norm': 4.8232993960045186e-05, 'learning_rate': 0.15614385364000227, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:27<03:42, 3.90s/it] 89%|████████▉ | 464/520 [29:31<03:38, 3.90s/it] {'loss': 8.545, 'grad_norm': 2.9921732149052956e-05, 'learning_rate': 0.1507684480352292, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:31<03:38, 3.90s/it] 89%|████████▉ | 465/520 [29:35<03:34, 3.91s/it] {'loss': 8.7474, 'grad_norm': 4.91928127293049e-05, 'learning_rate': 0.14548431979484133, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:35<03:34, 3.91s/it] 90%|████████▉ | 466/520 [29:39<03:30, 3.90s/it] {'loss': 8.4794, 'grad_norm': 3.951267927362652e-05, 'learning_rate': 0.14029167422908106, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:39<03:30, 3.90s/it] 90%|████████▉ | 467/520 [29:43<03:26, 3.90s/it] {'loss': 8.8847, 'grad_norm': 3.938357023270943e-05, 'learning_rate': 0.13519071309370995, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:43<03:26, 3.90s/it] 90%|█████████ | 468/520 [29:47<03:23, 3.92s/it] {'loss': 8.4304, 'grad_norm': 3.7117095799986505e-05, 'learning_rate': 0.13018163458217075, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:47<03:23, 3.92s/it] 90%|█████████ | 469/520 [29:51<03:19, 3.91s/it] {'loss': 8.5435, 'grad_norm': 3.3072121278898685e-05, 'learning_rate': 0.125264633317885, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:51<03:19, 3.91s/it] 90%|█████████ | 470/520 [29:55<03:15, 3.90s/it] {'loss': 8.2947, 'grad_norm': 2.7562744219885443e-05, 'learning_rate': 0.1204399003466941, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:55<03:15, 3.90s/it] 91%|█████████ | 471/520 [29:59<03:11, 3.90s/it] {'loss': 8.4096, 'grad_norm': 4.982080444955879e-05, 'learning_rate': 0.11570762312943295, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:59<03:11, 3.90s/it] 91%|█████████ | 472/520 [30:03<03:07, 3.91s/it] {'loss': 8.2489, 'grad_norm': 3.5405998428072183e-05, 'learning_rate': 0.11106798553464803, 'epoch': 0.91} + 91%|█████████ | 472/520 [30:03<03:07, 3.91s/it] 91%|█████████ | 473/520 [30:06<03:03, 3.91s/it] {'loss': 8.1982, 'grad_norm': 2.724910053158962e-05, 'learning_rate': 0.10652116783145482, 'epoch': 0.91} + 91%|█████████ | 473/520 [30:06<03:03, 3.91s/it] 91%|█████████ | 474/520 [30:10<02:58, 3.88s/it] {'loss': 9.0648, 'grad_norm': 5.7413150695292454e-05, 'learning_rate': 0.10206734668253059, 'epoch': 0.91} + 91%|█████████ | 474/520 [30:10<02:58, 3.88s/it] 91%|█████████▏| 475/520 [30:14<02:51, 3.82s/it] {'loss': 8.7323, 'grad_norm': 2.916578660855651e-05, 'learning_rate': 0.09770669513725128, 'epoch': 0.91} + 91%|█████████▏| 475/520 [30:14<02:51, 3.82s/it] 92%|█████████▏| 476/520 [30:18<02:46, 3.78s/it] {'loss': 8.253, 'grad_norm': 2.751085391763954e-05, 'learning_rate': 0.09343938262496992, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:18<02:46, 3.78s/it] 92%|█████████▏| 477/520 [30:21<02:41, 3.76s/it] {'loss': 8.25, 'grad_norm': 2.8402297238419313e-05, 'learning_rate': 0.08926557494843085, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:21<02:41, 3.76s/it] 92%|█████████▏| 478/520 [30:25<02:39, 3.79s/it] {'loss': 8.2522, 'grad_norm': 3.3411364733579055e-05, 'learning_rate': 0.0851854342773295, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:25<02:39, 3.79s/it] 92%|█████████▏| 479/520 [30:29<02:35, 3.79s/it] {'loss': 9.2353, 'grad_norm': 4.77938037549251e-05, 'learning_rate': 0.08119911914200972, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:29<02:35, 3.79s/it] 92%|█████████▏| 480/520 [30:33<02:30, 3.76s/it] {'loss': 9.0249, 'grad_norm': 4.8732625262995324e-05, 'learning_rate': 0.07730678442730538, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:33<02:30, 3.76s/it] 92%|█████████▎| 481/520 [30:36<02:25, 3.74s/it] {'loss': 9.1213, 'grad_norm': 3.810284183358713e-05, 'learning_rate': 0.07350858136652261, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:36<02:25, 3.74s/it] 93%|█████████▎| 482/520 [30:40<02:21, 3.73s/it] {'loss': 9.2586, 'grad_norm': 5.398934722808126e-05, 'learning_rate': 0.06980465753556375, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:40<02:21, 3.73s/it] 93%|█████████▎| 483/520 [30:44<02:17, 3.71s/it] {'loss': 8.4832, 'grad_norm': 3.0340808318788403e-05, 'learning_rate': 0.06619515684719163, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:44<02:17, 3.71s/it] 93%|█████████▎| 484/520 [30:47<02:13, 3.70s/it] {'loss': 8.4712, 'grad_norm': 3.02501020784452e-05, 'learning_rate': 0.06268021954544095, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:47<02:13, 3.70s/it] 93%|█████████▎| 485/520 [30:51<02:09, 3.70s/it] {'loss': 8.2909, 'grad_norm': 3.1365350738566895e-05, 'learning_rate': 0.059259982200166594, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:51<02:09, 3.70s/it] 93%|█████████▎| 486/520 [30:55<02:05, 3.69s/it] {'loss': 8.6062, 'grad_norm': 3.938942917841394e-05, 'learning_rate': 0.05593457770173865, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:55<02:05, 3.69s/it] 94%|█████████▎| 487/520 [30:58<02:01, 3.70s/it] {'loss': 8.1986, 'grad_norm': 3.965475852747885e-05, 'learning_rate': 0.05270413525587908, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:58<02:01, 3.70s/it] 94%|█████████▍| 488/520 [31:02<01:57, 3.68s/it] {'loss': 8.1074, 'grad_norm': 3.32383833721209e-05, 'learning_rate': 0.04956878037864043, 'epoch': 0.94} + 94%|█████████▍| 488/520 [31:02<01:57, 3.68s/it] 94%|█████████▍| 489/520 [31:06<01:54, 3.68s/it] {'loss': 9.0567, 'grad_norm': 4.242808497512502e-05, 'learning_rate': 0.04652863489153086, 'epoch': 0.94} + 94%|█████████▍| 489/520 [31:06<01:54, 3.68s/it] 94%|█████████▍| 490/520 [31:09<01:50, 3.68s/it] {'loss': 8.3173, 'grad_norm': 3.050710733652825e-05, 'learning_rate': 0.04358381691677932, 'epoch': 0.94} + 94%|█████████▍| 490/520 [31:09<01:50, 3.68s/it] 94%|█████████▍| 491/520 [31:13<01:46, 3.68s/it] {'loss': 8.2451, 'grad_norm': 2.8474322031385643e-05, 'learning_rate': 0.04073444087274669, 'epoch': 0.94} + 94%|█████████▍| 491/520 [31:13<01:46, 3.68s/it] 95%|█████████▍| 492/520 [31:17<01:43, 3.68s/it] {'loss': 8.5214, 'grad_norm': 2.9643789651887495e-05, 'learning_rate': 0.03798061746947995, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:17<01:43, 3.68s/it] 95%|█████████▍| 493/520 [31:21<01:39, 3.68s/it] {'loss': 9.1738, 'grad_norm': 5.750076613085385e-05, 'learning_rate': 0.035322453704410284, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:21<01:39, 3.68s/it] 95%|█████████▌| 494/520 [31:24<01:35, 3.68s/it] {'loss': 8.5162, 'grad_norm': 2.9777306791187845e-05, 'learning_rate': 0.032760052858197275, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:24<01:35, 3.68s/it] 95%|█████████▌| 495/520 [31:28<01:32, 3.69s/it] {'loss': 8.2209, 'grad_norm': 4.9163839332505526e-05, 'learning_rate': 0.030293514490713214, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:28<01:32, 3.69s/it] 95%|█████████▌| 496/520 [31:32<01:28, 3.69s/it] {'loss': 8.1287, 'grad_norm': 3.7998387422329336e-05, 'learning_rate': 0.027922934437178693, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:32<01:28, 3.69s/it] 96%|█████████▌| 497/520 [31:35<01:24, 3.69s/it] {'loss': 8.8387, 'grad_norm': 3.2855928805147414e-05, 'learning_rate': 0.025648404804435032, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:35<01:24, 3.69s/it] 96%|█████████▌| 498/520 [31:39<01:21, 3.69s/it] {'loss': 8.2963, 'grad_norm': 3.5924059722606306e-05, 'learning_rate': 0.023470013967367975, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:39<01:21, 3.69s/it] 96%|█████████▌| 499/520 [31:43<01:17, 3.69s/it] {'loss': 8.9445, 'grad_norm': 5.638624819373572e-05, 'learning_rate': 0.021387846565474045, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:43<01:17, 3.69s/it] 96%|█████████▌| 500/520 [31:46<01:13, 3.69s/it] {'loss': 8.4634, 'grad_norm': 4.136659970601763e-05, 'learning_rate': 0.01940198349956984, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:46<01:13, 3.69s/it] 96%|█████████▋| 501/520 [31:50<01:10, 3.70s/it] {'loss': 9.0855, 'grad_norm': 4.5224312410256464e-05, 'learning_rate': 0.017512501928650948, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:50<01:10, 3.70s/it] 97%|█████████▋| 502/520 [31:54<01:06, 3.69s/it] {'loss': 8.4859, 'grad_norm': 3.231054683502221e-05, 'learning_rate': 0.01571947526689349, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:54<01:06, 3.69s/it] 97%|█████████▋| 503/520 [31:57<01:02, 3.70s/it] {'loss': 9.0247, 'grad_norm': 3.818265412514045e-05, 'learning_rate': 0.01402297318080059, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:57<01:02, 3.70s/it] 97%|█████████▋| 504/520 [32:01<00:59, 3.70s/it] {'loss': 8.3461, 'grad_norm': 4.696649500869165e-05, 'learning_rate': 0.012423061586496476, 'epoch': 0.97} + 97%|█████████▋| 504/520 [32:01<00:59, 3.70s/it] 97%|█████████▋| 505/520 [32:05<00:55, 3.70s/it] {'loss': 8.4047, 'grad_norm': 2.829925084975834e-05, 'learning_rate': 0.010919802647165466, 'epoch': 0.97} + 97%|█████████▋| 505/520 [32:05<00:55, 3.70s/it] 97%|█████████▋| 506/520 [32:09<00:51, 3.70s/it] {'loss': 8.2033, 'grad_norm': 3.378321554843452e-05, 'learning_rate': 0.009513254770636137, 'epoch': 0.97} + 97%|█████████▋| 506/520 [32:09<00:51, 3.70s/it] 98%|█████████▊| 507/520 [32:12<00:47, 3.69s/it] {'loss': 9.1703, 'grad_norm': 7.847395511291086e-05, 'learning_rate': 0.008203472607112294, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:12<00:47, 3.69s/it] 98%|█████████▊| 508/520 [32:16<00:44, 3.68s/it] {'loss': 8.5838, 'grad_norm': 3.556446419196875e-05, 'learning_rate': 0.006990507047049677, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:16<00:44, 3.68s/it] 98%|█████████▊| 509/520 [32:20<00:40, 3.68s/it] {'loss': 8.4213, 'grad_norm': 3.6639707398510035e-05, 'learning_rate': 0.005874405219177814, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:20<00:40, 3.68s/it] 98%|█████████▊| 510/520 [32:23<00:36, 3.66s/it] {'loss': 8.3211, 'grad_norm': 2.89251002076449e-05, 'learning_rate': 0.004855210488670381, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:23<00:36, 3.66s/it] 98%|█████████▊| 511/520 [32:27<00:32, 3.66s/it] {'loss': 8.2969, 'grad_norm': 2.98950246969124e-05, 'learning_rate': 0.0039329624554584885, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:27<00:32, 3.66s/it] 98%|█████████▊| 512/520 [32:31<00:29, 3.68s/it] {'loss': 8.1984, 'grad_norm': 4.1747379728392724e-05, 'learning_rate': 0.003107696952694139, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:31<00:29, 3.68s/it] 99%|█████████▊| 513/520 [32:34<00:25, 3.67s/it] {'loss': 8.4634, 'grad_norm': 2.828571513145911e-05, 'learning_rate': 0.0023794460453555044, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:34<00:25, 3.67s/it] 99%|█████████▉| 514/520 [32:38<00:22, 3.68s/it] {'loss': 8.4645, 'grad_norm': 3.0336719952885834e-05, 'learning_rate': 0.0017482380290034794, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:38<00:22, 3.68s/it] 99%|█████████▉| 515/520 [32:42<00:18, 3.67s/it] {'loss': 8.5491, 'grad_norm': 3.776850348186462e-05, 'learning_rate': 0.0012140974286808937, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:42<00:18, 3.67s/it] 99%|█████████▉| 516/520 [32:45<00:14, 3.66s/it] {'loss': 8.2467, 'grad_norm': 3.1402237916136304e-05, 'learning_rate': 0.0007770449979593863, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:45<00:14, 3.66s/it] 99%|█████████▉| 517/520 [32:49<00:10, 3.65s/it] {'loss': 9.1616, 'grad_norm': 5.189396440670689e-05, 'learning_rate': 0.0004370977181339386, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:49<00:10, 3.65s/it] 100%|█████████▉| 518/520 [32:52<00:07, 3.63s/it] {'loss': 8.2873, 'grad_norm': 2.7814107658100515e-05, 'learning_rate': 0.00019426879756284654, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:52<00:07, 3.63s/it] 100%|█████████▉| 519/520 [32:56<00:03, 3.64s/it] {'loss': 8.9467, 'grad_norm': 4.240256494109952e-05, 'learning_rate': 4.856767115452021e-05, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:56<00:03, 3.64s/it] 100%|██████████| 520/520 [33:00<00:00, 3.86s/it] {'loss': 9.5135, 'grad_norm': 4.974859197042872e-05, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [33:00<00:00, 3.86s/it] {'train_runtime': 1980.9492, 'train_samples_per_second': 33.584, 'train_steps_per_second': 0.263, 'train_loss': 8.698346126538057, 'epoch': 1.0} + 100%|██████████| 520/520 [33:00<00:00, 3.86s/it] 100%|██████████| 520/520 [33:00<00:00, 3.81s/it] +[2025-10-09 06:23:34,623] [INFO] [launch.py:348:main] Process 829580 exits successfully. +[2025-10-09 06:23:35,624] [INFO] [launch.py:348:main] Process 829583 exits successfully. +[2025-10-09 06:23:35,625] [INFO] [launch.py:348:main] Process 829579 exits successfully. +[2025-10-09 06:23:35,625] [INFO] [launch.py:348:main] Process 829581 exits successfully. +[2025-10-09 06:23:35,625] [INFO] [launch.py:348:main] Process 829578 exits successfully. +[2025-10-09 06:23:35,626] [INFO] [launch.py:348:main] Process 829582 exits successfully. +[2025-10-09 06:23:35,626] [INFO] [launch.py:348:main] Process 829584 exits successfully. +[2025-10-09 06:23:39,631] [INFO] [launch.py:348:main] Process 829577 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5_connector-3.0_0.5_5_ablation_20251009_054857.log +Timestamp: 2025-10-09 06:23:42 +===================================== diff --git a/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation_20251009_085249.log b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation_20251009_085249.log new file mode 100644 index 0000000000000000000000000000000000000000..a4ea28eb35513dc12b36a718f04dbe43484b2e0a --- /dev/null +++ b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation_20251009_085249.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation_20251009_085249.log +Timestamp: 2025-10-09 08:52:49 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 08:52:51,758] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:52:54,420] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-09 08:52:54,421] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 5e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 5e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 08:52:57,060] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:52:58,087] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-09 08:52:58,087] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-09 08:52:58,087] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-09 08:52:58,087] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-09 08:52:58,087] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-09 08:52:58,087] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-09 08:52:58,087] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-09 08:52:58,090] [INFO] [launch.py:253:main] process 1025821 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:52:58,092] [INFO] [launch.py:253:main] process 1025822 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:52:58,094] [INFO] [launch.py:253:main] process 1025823 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:52:58,096] [INFO] [launch.py:253:main] process 1025824 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:52:58,098] [INFO] [launch.py:253:main] process 1025825 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:52:58,100] [INFO] [launch.py:253:main] process 1025826 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:52:58,102] [INFO] [launch.py:253:main] process 1025827 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 08:52:58,104] [INFO] [launch.py:253:main] process 1025828 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '5e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '5e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 08:53:03,939] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:53:04,372] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:53:04,663] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:53:04,845] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:53:04,971] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:53:04,971] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:53:04,989] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:53:04,989] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:53:05,017] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 08:53:05,067] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:53:05,245] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:53:05,369] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:53:05,369] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:53:05,394] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:53:05,394] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-09 08:53:05,394] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 08:53:05,421] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1025821:1025821 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025821:1025821 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025821:1025821 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1025821:1025821 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1025821:1025821 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1025821:1025821 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1025826:1025826 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1025826:1025826 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025826:1025826 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025825:1025825 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1025826:1025826 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1025826:1025826 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1025826:1025826 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1025825:1025825 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025825:1025825 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025825:1025825 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1025825:1025825 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1025825:1025825 [4] NCCL INFO NET/Plugin: Using internal network plugin. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1025827:1025827 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1025827:1025827 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025827:1025827 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025827:1025827 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1025827:1025827 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1025827:1025827 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1025828:1025828 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1025828:1025828 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025828:1025828 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025828:1025828 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1025828:1025828 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1025828:1025828 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1025822:1025822 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1025822:1025822 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025822:1025822 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025822:1025822 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1025822:1025822 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1025822:1025822 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1025824:1025824 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1025824:1025824 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025824:1025824 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025824:1025824 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1025824:1025824 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1025824:1025824 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1025823:1025823 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1025823:1025823 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025823:1025823 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025823:1025823 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1025823:1025823 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1025823:1025823 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO ncclCommInitRank comm 0x5576352ecae0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x4face0856aed9243 - Init START +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO ncclCommInitRank comm 0x55add6d771b0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x4face0856aed9243 - Init START +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO ncclCommInitRank comm 0x564ba7375400 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x4face0856aed9243 - Init START +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO ncclCommInitRank comm 0x56410c7e15c0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x4face0856aed9243 - Init START +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO ncclCommInitRank comm 0x5567cefdade0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x4face0856aed9243 - Init START +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO ncclCommInitRank comm 0x564fd9db66c0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x4face0856aed9243 - Init START +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO ncclCommInitRank comm 0x560ad54c9820 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x4face0856aed9243 - Init START +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO ncclCommInitRank comm 0x56025fd01900 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x4face0856aed9243 - Init START +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO comm 0x56025fd01900 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO comm 0x5567cefdade0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO comm 0x5576352ecae0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO comm 0x564ba7375400 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO comm 0x55add6d771b0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO comm 0x56410c7e15c0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO comm 0x560ad54c9820 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO comm 0x564fd9db66c0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1025826:1027438 [5] NCCL INFO ncclCommInitRank comm 0x5567cefdade0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x4face0856aed9243 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1025822:1027442 [1] NCCL INFO ncclCommInitRank comm 0x564ba7375400 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x4face0856aed9243 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1025825:1027439 [4] NCCL INFO ncclCommInitRank comm 0x564fd9db66c0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x4face0856aed9243 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1025821:1027437 [0] NCCL INFO ncclCommInitRank comm 0x560ad54c9820 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x4face0856aed9243 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025827:1027440 [6] NCCL INFO ncclCommInitRank comm 0x56025fd01900 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x4face0856aed9243 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1025823:1027444 [2] NCCL INFO ncclCommInitRank comm 0x5576352ecae0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x4face0856aed9243 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1025824:1027443 [3] NCCL INFO ncclCommInitRank comm 0x55add6d771b0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x4face0856aed9243 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1025828:1027441 [7] NCCL INFO ncclCommInitRank comm 0x56410c7e15c0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x4face0856aed9243 - Init COMPLETE +[2025-10-09 08:53:50,560] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-09 08:53:52,282] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-09 08:54:05,358 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-09 08:54:05,362 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:002->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1025825:1032342 [4] NCCL INFO ncclCommInitRank comm 0x7f7fd006aca0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xafac9e79aab45be2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025823:1032345 [2] NCCL INFO ncclCommInitRank comm 0x7f0c7006ab10 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xafac9e79aab45be2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025821:1032339 [0] NCCL INFO ncclCommInitRank comm 0x7f562006a9d0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xafac9e79aab45be2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025827:1032341 [6] NCCL INFO ncclCommInitRank comm 0x7f3e6806b050 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xafac9e79aab45be2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025828:1032346 [7] NCCL INFO ncclCommInitRank comm 0x7fe37c06a260 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xafac9e79aab45be2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025822:1032344 [1] NCCL INFO ncclCommInitRank comm 0x7fccb806a7a0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xafac9e79aab45be2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025824:1032343 [3] NCCL INFO ncclCommInitRank comm 0x7fa7cc06b330 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xafac9e79aab45be2 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1025826:1032340 [5] NCCL INFO ncclCommInitRank comm 0x7fc63406a8c0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xafac9e79aab45be2 - Init COMPLETE + 0%| | 1/520 [16:41<144:26:31, 1001.91s/it] {'loss': 2.0453, 'grad_norm': 0.0048351853914150675, 'learning_rate': 0.03125, 'epoch': 0.0} + 0%| | 1/520 [16:41<144:26:31, 1001.91s/it] 0%| | 2/520 [16:45<59:41:10, 414.81s/it] {'loss': 2.0549, 'grad_norm': 0.005249331466278906, 'learning_rate': 0.0625, 'epoch': 0.0} + 0%| | 2/520 [16:45<59:41:10, 414.81s/it] 1%| | 3/520 [16:49<32:37:12, 227.14s/it] {'loss': 2.1899, 'grad_norm': 0.0060051832265193995, 'learning_rate': 0.09375, 'epoch': 0.01} + 1%| | 3/520 [16:49<32:37:12, 227.14s/it] 1%| | 4/520 [16:53<19:55:09, 138.97s/it] {'loss': 1.7205, 'grad_norm': 0.0017628862805950216, 'learning_rate': 0.125, 'epoch': 0.01} + 1%| | 4/520 [16:53<19:55:09, 138.97s/it] 1%| | 5/520 [16:57<12:54:26, 90.23s/it] {'loss': 1.7413, 'grad_norm': 0.0015162678403429304, 'learning_rate': 0.15625, 'epoch': 0.01} + 1%| | 5/520 [16:57<12:54:26, 90.23s/it] 1%| | 6/520 [17:01<8:41:18, 60.85s/it] {'loss': 1.4086, 'grad_norm': 0.0005335514511434737, 'learning_rate': 0.1875, 'epoch': 0.01} + 1%| | 6/520 [17:01<8:41:18, 60.85s/it] 1%|▏ | 7/520 [17:04<6:00:50, 42.20s/it] {'loss': 1.4713, 'grad_norm': 0.0005252871101453024, 'learning_rate': 0.21875, 'epoch': 0.01} + 1%|▏ | 7/520 [17:04<6:00:50, 42.20s/it] 2%|▏ | 8/520 [17:09<4:17:32, 30.18s/it] {'loss': 1.4855, 'grad_norm': 0.0006248540092088121, 'learning_rate': 0.25, 'epoch': 0.02} + 2%|▏ | 8/520 [17:09<4:17:32, 30.18s/it] 2%|▏ | 9/520 [17:13<3:08:00, 22.08s/it] {'loss': 1.5486, 'grad_norm': 0.0006248470730554379, 'learning_rate': 0.28125, 'epoch': 0.02} + 2%|▏ | 9/520 [17:13<3:08:00, 22.08s/it] 2%|▏ | 10/520 [17:17<2:19:46, 16.44s/it] {'loss': 1.3746, 'grad_norm': 0.0007613700867028356, 'learning_rate': 0.3125, 'epoch': 0.02} + 2%|▏ | 10/520 [17:17<2:19:46, 16.44s/it] 2%|▏ | 11/520 [17:21<1:46:52, 12.60s/it] {'loss': 1.4303, 'grad_norm': 0.000823264686043381, 'learning_rate': 0.34375, 'epoch': 0.02} + 2%|▏ | 11/520 [17:21<1:46:52, 12.60s/it] 2%|▏ | 12/520 [17:25<1:24:21, 9.96s/it] {'loss': 1.3262, 'grad_norm': 0.0012714584975008752, 'learning_rate': 0.375, 'epoch': 0.02} + 2%|▏ | 12/520 [17:25<1:24:21, 9.96s/it][2025-10-09 09:11:39,644] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [17:29<1:10:10, 8.30s/it] {'loss': 1.3747, 'grad_norm': 0.0015047785011182264, 'learning_rate': 0.40625, 'epoch': 0.03} + 2%|▎ | 13/520 [17:29<1:10:10, 8.30s/it] 3%|▎ | 14/520 [17:33<58:35, 6.95s/it] {'loss': 1.4304, 'grad_norm': 0.0016229376323543048, 'learning_rate': 0.4375, 'epoch': 0.03} + 3%|▎ | 14/520 [17:33<58:35, 6.95s/it] 3%|▎ | 15/520 [17:37<50:39, 6.02s/it] {'loss': 1.3929, 'grad_norm': 0.0015753707295503615, 'learning_rate': 0.46875, 'epoch': 0.03} + 3%|▎ | 15/520 [17:37<50:39, 6.02s/it] 3%|▎ | 16/520 [17:41<44:58, 5.35s/it] {'loss': 1.381, 'grad_norm': 0.002034251900295014, 'learning_rate': 0.5, 'epoch': 0.03} + 3%|▎ | 16/520 [17:41<44:58, 5.35s/it] 3%|▎ | 17/520 [17:44<41:00, 4.89s/it] {'loss': 1.511, 'grad_norm': 0.002473367574229875, 'learning_rate': 0.4999951432328845, 'epoch': 0.03} + 3%|▎ | 17/520 [17:44<41:00, 4.89s/it] 3%|▎ | 18/520 [17:48<38:11, 4.56s/it] {'loss': 1.3979, 'grad_norm': 0.003182843416220789, 'learning_rate': 0.4999805731202437, 'epoch': 0.03} + 3%|▎ | 18/520 [17:48<38:11, 4.56s/it] 4%|▎ | 19/520 [17:52<35:51, 4.29s/it] {'loss': 1.4452, 'grad_norm': 0.0031901496924942024, 'learning_rate': 0.4999562902281866, 'epoch': 0.04} + 4%|▎ | 19/520 [17:52<35:51, 4.29s/it] 4%|▍ | 20/520 [17:56<34:16, 4.11s/it] {'loss': 1.4582, 'grad_norm': 0.00469910004557848, 'learning_rate': 0.49992229550020406, 'epoch': 0.04} + 4%|▍ | 20/520 [17:56<34:16, 4.11s/it] 4%|▍ | 21/520 [17:59<33:08, 3.98s/it] {'loss': 1.7046, 'grad_norm': 0.013300387463874892, 'learning_rate': 0.4998785902571319, 'epoch': 0.04} + 4%|▍ | 21/520 [17:59<33:08, 3.98s/it] 4%|▍ | 22/520 [18:03<32:12, 3.88s/it] {'loss': 1.9027, 'grad_norm': 0.01241011424604741, 'learning_rate': 0.49982517619709965, 'epoch': 0.04} + 4%|▍ | 22/520 [18:03<32:12, 3.88s/it] 4%|▍ | 23/520 [18:07<31:35, 3.81s/it] {'loss': 1.7362, 'grad_norm': 0.007771176758176205, 'learning_rate': 0.4997620553954645, 'epoch': 0.04} + 4%|▍ | 23/520 [18:07<31:35, 3.81s/it] 5%|▍ | 24/520 [18:10<31:02, 3.76s/it] {'loss': 1.6019, 'grad_norm': 0.004336903727083631, 'learning_rate': 0.49968923030473056, 'epoch': 0.05} + 5%|▍ | 24/520 [18:10<31:02, 3.76s/it] 5%|▍ | 25/520 [18:14<30:41, 3.72s/it] {'loss': 1.7214, 'grad_norm': 0.005435815949553256, 'learning_rate': 0.49960670375445415, 'epoch': 0.05} + 5%|▍ | 25/520 [18:14<30:41, 3.72s/it] 5%|▌ | 26/520 [18:18<30:31, 3.71s/it] {'loss': 1.856, 'grad_norm': 0.02868367876519436, 'learning_rate': 0.499514478951133, 'epoch': 0.05} + 5%|▌ | 26/520 [18:18<30:31, 3.71s/it] 5%|▌ | 27/520 [18:21<30:14, 3.68s/it] {'loss': 1.6446, 'grad_norm': 0.017016539907549665, 'learning_rate': 0.4994125594780822, 'epoch': 0.05} + 5%|▌ | 27/520 [18:21<30:14, 3.68s/it] 5%|▌ | 28/520 [18:25<30:29, 3.72s/it] {'loss': 1.7044, 'grad_norm': 0.008001920634491016, 'learning_rate': 0.499300949295295, 'epoch': 0.05} + 5%|▌ | 28/520 [18:25<30:29, 3.72s/it] 6%|▌ | 29/520 [18:29<30:52, 3.77s/it] {'loss': 1.9205, 'grad_norm': 0.020225638391506526, 'learning_rate': 0.49917965273928877, 'epoch': 0.06} + 6%|▌ | 29/520 [18:29<30:52, 3.77s/it] 6%|▌ | 30/520 [18:33<31:03, 3.80s/it] {'loss': 2.1469, 'grad_norm': 0.020352492158788246, 'learning_rate': 0.4990486745229364, 'epoch': 0.06} + 6%|▌ | 30/520 [18:33<31:03, 3.80s/it] 6%|▌ | 31/520 [18:37<31:11, 3.83s/it] {'loss': 1.8742, 'grad_norm': 0.023787749758943856, 'learning_rate': 0.4989080197352834, 'epoch': 0.06} + 6%|▌ | 31/520 [18:37<31:11, 3.83s/it] 6%|▌ | 32/520 [18:40<30:45, 3.78s/it] {'loss': 2.058, 'grad_norm': 0.009498580329671706, 'learning_rate': 0.4987576938413504, 'epoch': 0.06} + 6%|▌ | 32/520 [18:40<30:45, 3.78s/it] 6%|▋ | 33/520 [18:44<30:22, 3.74s/it] {'loss': 2.0021, 'grad_norm': 0.031905923235227576, 'learning_rate': 0.49859770268191994, 'epoch': 0.06} + 6%|▋ | 33/520 [18:44<30:22, 3.74s/it] 7%|▋ | 34/520 [18:48<30:13, 3.73s/it] {'loss': 2.1153, 'grad_norm': 0.03694761853331145, 'learning_rate': 0.4984280524733107, 'epoch': 0.07} + 7%|▋ | 34/520 [18:48<30:13, 3.73s/it] 7%|▋ | 35/520 [18:51<29:59, 3.71s/it] {'loss': 2.196, 'grad_norm': 0.020332472504051624, 'learning_rate': 0.49824874980713485, 'epoch': 0.07} + 7%|▋ | 35/520 [18:51<29:59, 3.71s/it] 7%|▋ | 36/520 [18:55<30:06, 3.73s/it] {'loss': 1.9596, 'grad_norm': 0.010080688060328972, 'learning_rate': 0.49805980165004304, 'epoch': 0.07} + 7%|▋ | 36/520 [18:55<30:06, 3.73s/it] 7%|▋ | 37/520 [18:59<30:10, 3.75s/it] {'loss': 2.0498, 'grad_norm': 0.008942950692359173, 'learning_rate': 0.4978612153434526, 'epoch': 0.07} + 7%|▋ | 37/520 [18:59<30:10, 3.75s/it] 7%|▋ | 38/520 [19:03<29:56, 3.73s/it] {'loss': 1.9374, 'grad_norm': 0.004303756212304162, 'learning_rate': 0.4976529986032632, 'epoch': 0.07} + 7%|▋ | 38/520 [19:03<29:56, 3.73s/it] 8%|▊ | 39/520 [19:06<29:39, 3.70s/it] {'loss': 1.7029, 'grad_norm': 0.0037155265566464067, 'learning_rate': 0.4974351595195565, 'epoch': 0.07} + 8%|▊ | 39/520 [19:06<29:39, 3.70s/it] 8%|▊ | 40/520 [19:10<29:26, 3.68s/it] {'loss': 1.7344, 'grad_norm': 0.0038006939972804184, 'learning_rate': 0.4972077065562821, 'epoch': 0.08} + 8%|▊ | 40/520 [19:10<29:26, 3.68s/it] 8%|▊ | 41/520 [19:13<29:19, 3.67s/it] {'loss': 1.7015, 'grad_norm': 0.004091043647505732, 'learning_rate': 0.49697064855092865, 'epoch': 0.08} + 8%|▊ | 41/520 [19:13<29:19, 3.67s/it] 8%|▊ | 42/520 [19:17<29:15, 3.67s/it] {'loss': 1.7094, 'grad_norm': 0.003214443349971387, 'learning_rate': 0.4967239947141803, 'epoch': 0.08} + 8%|▊ | 42/520 [19:17<29:15, 3.67s/it] 8%|▊ | 43/520 [19:21<29:14, 3.68s/it] {'loss': 1.9039, 'grad_norm': 0.00728763180323383, 'learning_rate': 0.49646775462955894, 'epoch': 0.08} + 8%|▊ | 43/520 [19:21<29:14, 3.68s/it] 8%|▊ | 44/520 [19:25<29:10, 3.68s/it] {'loss': 1.9035, 'grad_norm': 0.0035650535937851845, 'learning_rate': 0.49620193825305203, 'epoch': 0.08} + 8%|▊ | 44/520 [19:25<29:10, 3.68s/it] 9%|▊ | 45/520 [19:28<29:06, 3.68s/it] {'loss': 1.6641, 'grad_norm': 0.0024243517518790616, 'learning_rate': 0.49592655591272533, 'epoch': 0.09} + 9%|▊ | 45/520 [19:28<29:06, 3.68s/it] 9%|▉ | 46/520 [19:32<28:59, 3.67s/it] {'loss': 1.9437, 'grad_norm': 0.0026335514944591364, 'learning_rate': 0.4956416183083221, 'epoch': 0.09} + 9%|▉ | 46/520 [19:32<28:59, 3.67s/it] 9%|▉ | 47/520 [19:35<28:53, 3.66s/it] {'loss': 1.6366, 'grad_norm': 0.002336785389917861, 'learning_rate': 0.49534713651084694, 'epoch': 0.09} + 9%|▉ | 47/520 [19:35<28:53, 3.66s/it] 9%|▉ | 48/520 [19:39<28:46, 3.66s/it] {'loss': 1.5976, 'grad_norm': 0.002418674601878588, 'learning_rate': 0.49504312196213596, 'epoch': 0.09} + 9%|▉ | 48/520 [19:39<28:46, 3.66s/it] 9%|▉ | 49/520 [19:43<28:39, 3.65s/it] {'loss': 1.6052, 'grad_norm': 0.0018902458483071264, 'learning_rate': 0.4947295864744121, 'epoch': 0.09} + 9%|▉ | 49/520 [19:43<28:39, 3.65s/it] 10%|▉ | 50/520 [19:46<28:45, 3.67s/it] {'loss': 1.5878, 'grad_norm': 0.0019483195141152643, 'learning_rate': 0.49440654222982616, 'epoch': 0.1} + 10%|▉ | 50/520 [19:46<28:45, 3.67s/it] 10%|▉ | 51/520 [19:50<28:43, 3.67s/it] {'loss': 1.4964, 'grad_norm': 0.002268381427494804, 'learning_rate': 0.49407400177998334, 'epoch': 0.1} + 10%|▉ | 51/520 [19:50<28:43, 3.67s/it] 10%|█ | 52/520 [19:54<28:47, 3.69s/it] {'loss': 1.6351, 'grad_norm': 0.0022474035264448467, 'learning_rate': 0.4937319780454559, 'epoch': 0.1} + 10%|█ | 52/520 [19:54<28:47, 3.69s/it] 10%|█ | 53/520 [19:58<28:37, 3.68s/it] {'loss': 1.6232, 'grad_norm': 0.0018870349536244508, 'learning_rate': 0.4933804843152808, 'epoch': 0.1} + 10%|█ | 53/520 [19:58<28:37, 3.68s/it] 10%|█ | 54/520 [20:01<28:28, 3.67s/it] {'loss': 1.5188, 'grad_norm': 0.0017912963370505278, 'learning_rate': 0.4930195342464436, 'epoch': 0.1} + 10%|█ | 54/520 [20:01<28:28, 3.67s/it] 11%|█ | 55/520 [20:05<28:27, 3.67s/it] {'loss': 1.4992, 'grad_norm': 0.0018864910002329774, 'learning_rate': 0.49264914186334774, 'epoch': 0.11} + 11%|█ | 55/520 [20:05<28:27, 3.67s/it] 11%|█ | 56/520 [20:09<28:22, 3.67s/it] {'loss': 1.6237, 'grad_norm': 0.0018475879582465725, 'learning_rate': 0.4922693215572695, 'epoch': 0.11} + 11%|█ | 56/520 [20:09<28:22, 3.67s/it] 11%|█ | 57/520 [20:12<28:12, 3.66s/it] {'loss': 1.4706, 'grad_norm': 0.0020668898200210395, 'learning_rate': 0.4918800880857991, 'epoch': 0.11} + 11%|█ | 57/520 [20:12<28:12, 3.66s/it] 11%|█ | 58/520 [20:16<28:18, 3.68s/it] {'loss': 1.6287, 'grad_norm': 0.001407303790243296, 'learning_rate': 0.49148145657226705, 'epoch': 0.11} + 11%|█ | 58/520 [20:16<28:18, 3.68s/it] 11%|█▏ | 59/520 [20:20<28:26, 3.70s/it] {'loss': 1.6089, 'grad_norm': 0.0031292969131395073, 'learning_rate': 0.49107344250515694, 'epoch': 0.11} + 11%|█▏ | 59/520 [20:20<28:26, 3.70s/it] 12%|█▏ | 60/520 [20:23<28:23, 3.70s/it] {'loss': 1.5406, 'grad_norm': 0.0014702092446721932, 'learning_rate': 0.490656061737503, 'epoch': 0.12} + 12%|█▏ | 60/520 [20:23<28:23, 3.70s/it] 12%|█▏ | 61/520 [20:27<28:27, 3.72s/it] {'loss': 1.7815, 'grad_norm': 0.0020792736117648623, 'learning_rate': 0.49022933048627493, 'epoch': 0.12} + 12%|█▏ | 61/520 [20:27<28:27, 3.72s/it] 12%|█▏ | 62/520 [20:31<28:12, 3.69s/it] {'loss': 1.5165, 'grad_norm': 0.0014677576428368242, 'learning_rate': 0.48979326533174694, 'epoch': 0.12} + 12%|█▏ | 62/520 [20:31<28:12, 3.69s/it] 12%|█▏ | 63/520 [20:34<27:59, 3.68s/it] {'loss': 1.5239, 'grad_norm': 0.0014734678590469423, 'learning_rate': 0.4893478832168545, 'epoch': 0.12} + 12%|█▏ | 63/520 [20:34<27:59, 3.68s/it] 12%|█▏ | 64/520 [20:38<27:54, 3.67s/it] {'loss': 1.5194, 'grad_norm': 0.0014601496743115374, 'learning_rate': 0.4888932014465352, 'epoch': 0.12} + 12%|█▏ | 64/520 [20:38<27:54, 3.67s/it] 12%|█▎ | 65/520 [20:42<27:47, 3.66s/it] {'loss': 1.5385, 'grad_norm': 0.0017518245264422272, 'learning_rate': 0.4884292376870567, 'epoch': 0.12} + 12%|█▎ | 65/520 [20:42<27:47, 3.66s/it] 13%|█▎ | 66/520 [20:45<27:46, 3.67s/it] {'loss': 1.4949, 'grad_norm': 0.0014028457383652461, 'learning_rate': 0.4879560099653306, 'epoch': 0.13} + 13%|█▎ | 66/520 [20:45<27:46, 3.67s/it] 13%|█▎ | 67/520 [20:49<27:36, 3.66s/it] {'loss': 1.3586, 'grad_norm': 0.001400925157489826, 'learning_rate': 0.48747353666821147, 'epoch': 0.13} + 13%|█▎ | 67/520 [20:49<27:36, 3.66s/it] 13%|█▎ | 68/520 [20:53<28:10, 3.74s/it] {'loss': 1.4246, 'grad_norm': 0.001485765592547125, 'learning_rate': 0.48698183654178295, 'epoch': 0.13} + 13%|█▎ | 68/520 [20:53<28:10, 3.74s/it] 13%|█▎ | 69/520 [20:57<28:04, 3.73s/it] {'loss': 1.3881, 'grad_norm': 0.0013839424823291284, 'learning_rate': 0.486480928690629, 'epoch': 0.13} + 13%|█▎ | 69/520 [20:57<28:04, 3.73s/it] 13%|█▎ | 70/520 [21:00<27:53, 3.72s/it] {'loss': 1.4496, 'grad_norm': 0.0013824581661932669, 'learning_rate': 0.4859708325770919, 'epoch': 0.13} + 13%|█▎ | 70/520 [21:00<27:53, 3.72s/it] 14%|█▎ | 71/520 [21:04<27:41, 3.70s/it] {'loss': 1.3623, 'grad_norm': 0.0012446170051022324, 'learning_rate': 0.48545156802051587, 'epoch': 0.14} + 14%|█▎ | 71/520 [21:04<27:41, 3.70s/it] 14%|█▍ | 72/520 [21:08<27:37, 3.70s/it] {'loss': 1.5153, 'grad_norm': 0.0012807202783882588, 'learning_rate': 0.4849231551964771, 'epoch': 0.14} + 14%|█▍ | 72/520 [21:08<27:37, 3.70s/it] 14%|█▍ | 73/520 [21:11<27:26, 3.68s/it] {'loss': 1.3488, 'grad_norm': 0.001210516200262216, 'learning_rate': 0.48438561463599983, 'epoch': 0.14} + 14%|█▍ | 73/520 [21:11<27:26, 3.68s/it] 14%|█▍ | 74/520 [21:15<27:21, 3.68s/it] {'loss': 1.4694, 'grad_norm': 0.0015395837281086304, 'learning_rate': 0.4838389672247585, 'epoch': 0.14} + 14%|█▍ | 74/520 [21:15<27:21, 3.68s/it] 14%|█▍ | 75/520 [21:19<27:18, 3.68s/it] {'loss': 1.3579, 'grad_norm': 0.0012464359030446813, 'learning_rate': 0.4832832342022666, 'epoch': 0.14} + 14%|█▍ | 75/520 [21:19<27:18, 3.68s/it] 15%|█▍ | 76/520 [21:22<27:07, 3.67s/it] {'loss': 1.7029, 'grad_norm': 0.001883939130213514, 'learning_rate': 0.48271843716105106, 'epoch': 0.15} + 15%|█▍ | 76/520 [21:22<27:07, 3.67s/it] 15%|█▍ | 77/520 [21:26<27:23, 3.71s/it] {'loss': 1.279, 'grad_norm': 0.0015811920523581412, 'learning_rate': 0.4821445980458134, 'epoch': 0.15} + 15%|█▍ | 77/520 [21:26<27:23, 3.71s/it] 15%|█▌ | 78/520 [21:30<27:45, 3.77s/it] {'loss': 1.414, 'grad_norm': 0.0011924863435416393, 'learning_rate': 0.48156173915257716, 'epoch': 0.15} + 15%|█▌ | 78/520 [21:30<27:45, 3.77s/it] 15%|█▌ | 79/520 [21:34<27:40, 3.77s/it] {'loss': 1.3918, 'grad_norm': 0.0010217023166406744, 'learning_rate': 0.4809698831278217, 'epoch': 0.15} + 15%|█▌ | 79/520 [21:34<27:40, 3.77s/it] 15%|█▌ | 80/520 [21:38<27:35, 3.76s/it] {'loss': 1.7179, 'grad_norm': 0.002051908502245472, 'learning_rate': 0.4803690529676019, 'epoch': 0.15} + 15%|█▌ | 80/520 [21:38<27:35, 3.76s/it] 16%|█▌ | 81/520 [21:41<27:18, 3.73s/it] {'loss': 1.556, 'grad_norm': 0.0014552412779906905, 'learning_rate': 0.47975927201665514, 'epoch': 0.16} + 16%|█▌ | 81/520 [21:41<27:18, 3.73s/it] 16%|█▌ | 82/520 [21:45<27:03, 3.71s/it] {'loss': 1.4651, 'grad_norm': 0.0010801155802827207, 'learning_rate': 0.47914056396749405, 'epoch': 0.16} + 16%|█▌ | 82/520 [21:45<27:03, 3.71s/it] 16%|█▌ | 83/520 [21:49<26:57, 3.70s/it] {'loss': 1.5042, 'grad_norm': 0.0011408108545036045, 'learning_rate': 0.4785129528594858, 'epoch': 0.16} + 16%|█▌ | 83/520 [21:49<26:57, 3.70s/it] 16%|█▌ | 84/520 [21:52<26:46, 3.69s/it] {'loss': 1.48, 'grad_norm': 0.001100392792076491, 'learning_rate': 0.4778764630779183, 'epoch': 0.16} + 16%|█▌ | 84/520 [21:52<26:46, 3.69s/it] 16%|█▋ | 85/520 [21:56<26:41, 3.68s/it] {'loss': 1.4934, 'grad_norm': 0.0009942299395681127, 'learning_rate': 0.47723111935305274, 'epoch': 0.16} + 16%|█▋ | 85/520 [21:56<26:41, 3.68s/it] 17%|█▋ | 86/520 [22:00<26:38, 3.68s/it] {'loss': 1.5364, 'grad_norm': 0.0011053100178585538, 'learning_rate': 0.4765769467591625, 'epoch': 0.17} + 17%|█▋ | 86/520 [22:00<26:38, 3.68s/it] 17%|█▋ | 87/520 [22:03<26:35, 3.69s/it] {'loss': 1.644, 'grad_norm': 0.001295933286193694, 'learning_rate': 0.4759139707135592, 'epoch': 0.17} + 17%|█▋ | 87/520 [22:03<26:35, 3.69s/it] 17%|█▋ | 88/520 [22:07<27:02, 3.76s/it] {'loss': 1.6962, 'grad_norm': 0.001120094903461827, 'learning_rate': 0.47524221697560476, 'epoch': 0.17} + 17%|█▋ | 88/520 [22:07<27:02, 3.76s/it] 17%|█▋ | 89/520 [22:11<27:15, 3.79s/it] {'loss': 1.4674, 'grad_norm': 0.0009964082785026305, 'learning_rate': 0.47456171164571104, 'epoch': 0.17} + 17%|█▋ | 89/520 [22:11<27:15, 3.79s/it] 17%|█▋ | 90/520 [22:15<27:09, 3.79s/it] {'loss': 1.3888, 'grad_norm': 0.0011417572272625159, 'learning_rate': 0.47387248116432523, 'epoch': 0.17} + 17%|█▋ | 90/520 [22:15<27:09, 3.79s/it] 18%|█▊ | 91/520 [22:19<27:02, 3.78s/it] {'loss': 1.484, 'grad_norm': 0.0009757474148380322, 'learning_rate': 0.4731745523109029, 'epoch': 0.17} + 18%|█▊ | 91/520 [22:19<27:02, 3.78s/it] 18%|█▊ | 92/520 [22:22<26:56, 3.78s/it] {'loss': 1.41, 'grad_norm': 0.0009928765929594382, 'learning_rate': 0.4724679522028672, 'epoch': 0.18} + 18%|█▊ | 92/520 [22:22<26:56, 3.78s/it] 18%|█▊ | 93/520 [22:26<26:55, 3.78s/it] {'loss': 1.4063, 'grad_norm': 0.0010417570388144824, 'learning_rate': 0.4717527082945554, 'epoch': 0.18} + 18%|█▊ | 93/520 [22:26<26:55, 3.78s/it] 18%|█▊ | 94/520 [22:30<26:50, 3.78s/it] {'loss': 1.5153, 'grad_norm': 0.0010782812642344396, 'learning_rate': 0.47102884837615244, 'epoch': 0.18} + 18%|█▊ | 94/520 [22:30<26:50, 3.78s/it] 18%|█▊ | 95/520 [22:34<26:45, 3.78s/it] {'loss': 1.3871, 'grad_norm': 0.001060865582676642, 'learning_rate': 0.4702964005726106, 'epoch': 0.18} + 18%|█▊ | 95/520 [22:34<26:45, 3.78s/it] 18%|█▊ | 96/520 [22:37<26:41, 3.78s/it] {'loss': 1.4037, 'grad_norm': 0.0008466305911747607, 'learning_rate': 0.46955539334255714, 'epoch': 0.18} + 18%|█▊ | 96/520 [22:37<26:41, 3.78s/it] 19%|█▊ | 97/520 [22:41<26:33, 3.77s/it] {'loss': 1.3565, 'grad_norm': 0.0009784212401089067, 'learning_rate': 0.46880585547718845, 'epoch': 0.19} + 19%|█▊ | 97/520 [22:41<26:33, 3.77s/it] 19%|█▉ | 98/520 [22:45<26:22, 3.75s/it] {'loss': 1.3746, 'grad_norm': 0.0008179595542689816, 'learning_rate': 0.46804781609915136, 'epoch': 0.19} + 19%|█▉ | 98/520 [22:45<26:22, 3.75s/it] 19%|█▉ | 99/520 [22:49<26:14, 3.74s/it] {'loss': 1.3913, 'grad_norm': 0.0010132703173233474, 'learning_rate': 0.4672813046614116, 'epoch': 0.19} + 19%|█▉ | 99/520 [22:49<26:14, 3.74s/it] 19%|█▉ | 100/520 [22:52<26:07, 3.73s/it] {'loss': 1.5166, 'grad_norm': 0.0010133400923585988, 'learning_rate': 0.4665063509461097, 'epoch': 0.19} + 19%|█▉ | 100/520 [22:52<26:07, 3.73s/it] 19%|█▉ | 101/520 [22:56<25:56, 3.72s/it] {'loss': 1.3914, 'grad_norm': 0.0008571762728547687, 'learning_rate': 0.46572298506340326, 'epoch': 0.19} + 19%|█▉ | 101/520 [22:56<25:56, 3.72s/it] 20%|█▉ | 102/520 [23:00<25:51, 3.71s/it] {'loss': 1.3743, 'grad_norm': 0.0009642535880602358, 'learning_rate': 0.4649312374502976, 'epoch': 0.2} + 20%|█▉ | 102/520 [23:00<25:51, 3.71s/it] 20%|█▉ | 103/520 [23:03<25:42, 3.70s/it] {'loss': 1.3324, 'grad_norm': 0.0008264364596866102, 'learning_rate': 0.4641311388694629, 'epoch': 0.2} + 20%|█▉ | 103/520 [23:03<25:42, 3.70s/it] 20%|██ | 104/520 [23:07<25:43, 3.71s/it] {'loss': 1.3979, 'grad_norm': 0.0009041201965829307, 'learning_rate': 0.4633227204080389, 'epoch': 0.2} + 20%|██ | 104/520 [23:07<25:43, 3.71s/it] 20%|██ | 105/520 [23:11<25:36, 3.70s/it] {'loss': 1.4071, 'grad_norm': 0.0008111593205342135, 'learning_rate': 0.4625060134764273, 'epoch': 0.2} + 20%|██ | 105/520 [23:11<25:36, 3.70s/it] 20%|██ | 106/520 [23:15<25:29, 3.69s/it] {'loss': 1.5187, 'grad_norm': 0.0011624276430627193, 'learning_rate': 0.46168104980707103, 'epoch': 0.2} + 20%|██ | 106/520 [23:15<25:29, 3.69s/it] 21%|██ | 107/520 [23:18<25:43, 3.74s/it] {'loss': 1.4741, 'grad_norm': 0.0010161739293090047, 'learning_rate': 0.46084786145322143, 'epoch': 0.21} + 21%|██ | 107/520 [23:18<25:43, 3.74s/it] 21%|██ | 108/520 [23:22<25:54, 3.77s/it] {'loss': 1.3458, 'grad_norm': 0.0008421682932021527, 'learning_rate': 0.4600064807876929, 'epoch': 0.21} + 21%|██ | 108/520 [23:22<25:54, 3.77s/it] 21%|██ | 109/520 [23:26<25:59, 3.80s/it] {'loss': 1.4952, 'grad_norm': 0.000822016759092633, 'learning_rate': 0.45915694050160494, 'epoch': 0.21} + 21%|██ | 109/520 [23:26<25:59, 3.80s/it] 21%|██ | 110/520 [23:30<25:35, 3.75s/it] {'loss': 1.5333, 'grad_norm': 0.0009319626320635891, 'learning_rate': 0.45829927360311223, 'epoch': 0.21} + 21%|██ | 110/520 [23:30<25:35, 3.75s/it] 21%|██▏ | 111/520 [23:33<25:26, 3.73s/it] {'loss': 1.5323, 'grad_norm': 0.0008545544629330217, 'learning_rate': 0.4574335134161219, 'epoch': 0.21} + 21%|██▏ | 111/520 [23:33<25:26, 3.73s/it] 22%|██▏ | 112/520 [23:37<25:14, 3.71s/it] {'loss': 1.4164, 'grad_norm': 0.0008256956878129589, 'learning_rate': 0.4565596935789987, 'epoch': 0.22} + 22%|██▏ | 112/520 [23:37<25:14, 3.71s/it] 22%|██▏ | 113/520 [23:41<25:06, 3.70s/it] {'loss': 1.2891, 'grad_norm': 0.0007858147691718582, 'learning_rate': 0.45567784804325834, 'epoch': 0.22} + 22%|██▏ | 113/520 [23:41<25:06, 3.70s/it] 22%|██▏ | 114/520 [23:44<24:51, 3.67s/it] {'loss': 1.3953, 'grad_norm': 0.0007698317685010676, 'learning_rate': 0.454788011072248, 'epoch': 0.22} + 22%|██▏ | 114/520 [23:44<24:51, 3.67s/it] 22%|██▏ | 115/520 [23:48<24:49, 3.68s/it] {'loss': 1.5216, 'grad_norm': 0.0008287788101603307, 'learning_rate': 0.45389021723981504, 'epoch': 0.22} + 22%|██▏ | 115/520 [23:48<24:49, 3.68s/it] 22%|██▏ | 116/520 [23:52<24:45, 3.68s/it] {'loss': 1.507, 'grad_norm': 0.0008965911182977251, 'learning_rate': 0.4529845014289642, 'epoch': 0.22} + 22%|██▏ | 116/520 [23:52<24:45, 3.68s/it] 22%|██▎ | 117/520 [23:55<24:42, 3.68s/it] {'loss': 1.4811, 'grad_norm': 0.0009322876553120803, 'learning_rate': 0.45207089883050133, 'epoch': 0.23} + 22%|██▎ | 117/520 [23:55<24:42, 3.68s/it] 23%|██▎ | 118/520 [23:59<24:39, 3.68s/it] {'loss': 1.3703, 'grad_norm': 0.0007957346761303421, 'learning_rate': 0.4511494449416671, 'epoch': 0.23} + 23%|██▎ | 118/520 [23:59<24:39, 3.68s/it] 23%|██▎ | 119/520 [24:03<24:35, 3.68s/it] {'loss': 1.3249, 'grad_norm': 0.0008478260791981415, 'learning_rate': 0.450220175564757, 'epoch': 0.23} + 23%|██▎ | 119/520 [24:03<24:35, 3.68s/it] 23%|██▎ | 120/520 [24:06<24:41, 3.70s/it] {'loss': 1.3545, 'grad_norm': 0.0009680393591249714, 'learning_rate': 0.4492831268057306, 'epoch': 0.23} + 23%|██▎ | 120/520 [24:06<24:41, 3.70s/it] 23%|██▎ | 121/520 [24:10<24:34, 3.69s/it] {'loss': 1.4013, 'grad_norm': 0.0007817364874110806, 'learning_rate': 0.4483383350728088, 'epoch': 0.23} + 23%|██▎ | 121/520 [24:10<24:34, 3.69s/it] 23%|██▎ | 122/520 [24:14<24:25, 3.68s/it] {'loss': 1.2813, 'grad_norm': 0.0007498902846360074, 'learning_rate': 0.44738583707505886, 'epoch': 0.23} + 23%|██▎ | 122/520 [24:14<24:25, 3.68s/it] 24%|██▎ | 123/520 [24:17<24:20, 3.68s/it] {'loss': 1.5678, 'grad_norm': 0.0009746370518333518, 'learning_rate': 0.44642566982096843, 'epoch': 0.24} + 24%|██▎ | 123/520 [24:17<24:20, 3.68s/it] 24%|██▍ | 124/520 [24:21<24:15, 3.68s/it] {'loss': 1.3764, 'grad_norm': 0.0008502845996968732, 'learning_rate': 0.4454578706170075, 'epoch': 0.24} + 24%|██▍ | 124/520 [24:21<24:15, 3.68s/it] 24%|██▍ | 125/520 [24:25<24:15, 3.69s/it] {'loss': 1.3544, 'grad_norm': 0.0007458843212169514, 'learning_rate': 0.4444824770661787, 'epoch': 0.24} + 24%|██▍ | 125/520 [24:25<24:15, 3.69s/it] 24%|██▍ | 126/520 [24:29<25:34, 3.90s/it] {'loss': 1.4708, 'grad_norm': 0.0006860247911949004, 'learning_rate': 0.4434995270665569, 'epoch': 0.24} + 24%|██▍ | 126/520 [24:29<25:34, 3.90s/it] 24%|██▍ | 127/520 [24:33<25:09, 3.84s/it] {'loss': 1.333, 'grad_norm': 0.0008174033584775802, 'learning_rate': 0.4425090588098157, 'epoch': 0.24} + 24%|██▍ | 127/520 [24:33<25:09, 3.84s/it] 25%|██▍ | 128/520 [24:37<24:54, 3.81s/it] {'loss': 1.3956, 'grad_norm': 0.0007763616718615149, 'learning_rate': 0.4415111107797445, 'epoch': 0.25} + 25%|██▍ | 128/520 [24:37<24:54, 3.81s/it] 25%|██▍ | 129/520 [24:40<24:36, 3.78s/it] {'loss': 1.3324, 'grad_norm': 0.000724801669244287, 'learning_rate': 0.4405057217507527, 'epoch': 0.25} + 25%|██▍ | 129/520 [24:40<24:36, 3.78s/it] 25%|██▌ | 130/520 [24:44<24:29, 3.77s/it] {'loss': 1.3934, 'grad_norm': 0.0007079119371879232, 'learning_rate': 0.43949293078636326, 'epoch': 0.25} + 25%|██▌ | 130/520 [24:44<24:29, 3.77s/it] 25%|██▌ | 131/520 [24:48<24:23, 3.76s/it] {'loss': 1.4395, 'grad_norm': 0.0008113789926581754, 'learning_rate': 0.43847277723769496, 'epoch': 0.25} + 25%|██▌ | 131/520 [24:48<24:23, 3.76s/it] 25%|██▌ | 132/520 [24:52<24:22, 3.77s/it] {'loss': 1.4351, 'grad_norm': 0.0007607454131160285, 'learning_rate': 0.43744530074193355, 'epoch': 0.25} + 25%|██▌ | 132/520 [24:52<24:22, 3.77s/it] 26%|██▌ | 133/520 [24:56<24:30, 3.80s/it] {'loss': 1.3459, 'grad_norm': 0.0007526440005843836, 'learning_rate': 0.43641054122079137, 'epoch': 0.26} + 26%|██▌ | 133/520 [24:56<24:30, 3.80s/it] 26%|██▌ | 134/520 [24:59<24:34, 3.82s/it] {'loss': 1.4219, 'grad_norm': 0.0006875491808942842, 'learning_rate': 0.43536853887895666, 'epoch': 0.26} + 26%|██▌ | 134/520 [24:59<24:34, 3.82s/it] 26%|██▌ | 135/520 [25:03<24:19, 3.79s/it] {'loss': 1.4923, 'grad_norm': 0.0007378185327364014, 'learning_rate': 0.434319334202531, 'epoch': 0.26} + 26%|██▌ | 135/520 [25:03<24:19, 3.79s/it] 26%|██▌ | 136/520 [25:07<24:06, 3.77s/it] {'loss': 1.4244, 'grad_norm': 0.0008487264726408001, 'learning_rate': 0.43326296795745656, 'epoch': 0.26} + 26%|██▌ | 136/520 [25:07<24:06, 3.77s/it] 26%|██▋ | 137/520 [25:11<23:53, 3.74s/it] {'loss': 1.3405, 'grad_norm': 0.0008615820255197304, 'learning_rate': 0.4321994811879321, 'epoch': 0.26} + 26%|██▋ | 137/520 [25:11<23:53, 3.74s/it] 27%|██▋ | 138/520 [25:14<23:40, 3.72s/it] {'loss': 1.348, 'grad_norm': 0.0006586571536365237, 'learning_rate': 0.43112891521481816, 'epoch': 0.27} + 27%|██▋ | 138/520 [25:14<23:40, 3.72s/it] 27%|██▋ | 139/520 [25:18<23:32, 3.71s/it] {'loss': 1.3368, 'grad_norm': 0.0007538645956432582, 'learning_rate': 0.43005131163403165, 'epoch': 0.27} + 27%|██▋ | 139/520 [25:18<23:32, 3.71s/it] 27%|██▋ | 140/520 [25:22<23:23, 3.69s/it] {'loss': 1.4836, 'grad_norm': 0.0006943837336255299, 'learning_rate': 0.4289667123149296, 'epoch': 0.27} + 27%|██▋ | 140/520 [25:22<23:23, 3.69s/it] 27%|██▋ | 141/520 [25:25<23:14, 3.68s/it] {'loss': 1.4584, 'grad_norm': 0.000665809057545097, 'learning_rate': 0.42787515939868254, 'epoch': 0.27} + 27%|██▋ | 141/520 [25:25<23:14, 3.68s/it] 27%|██▋ | 142/520 [25:29<23:13, 3.69s/it] {'loss': 1.5247, 'grad_norm': 0.0007577041369386912, 'learning_rate': 0.42677669529663687, 'epoch': 0.27} + 27%|██▋ | 142/520 [25:29<23:13, 3.69s/it] 28%|██▊ | 143/520 [25:33<23:16, 3.70s/it] {'loss': 1.3824, 'grad_norm': 0.0008496938810092883, 'learning_rate': 0.4256713626886673, 'epoch': 0.28} + 28%|██▊ | 143/520 [25:33<23:16, 3.70s/it] 28%|██▊ | 144/520 [25:36<23:13, 3.71s/it] {'loss': 1.343, 'grad_norm': 0.0007778460610569154, 'learning_rate': 0.4245592045215182, 'epoch': 0.28} + 28%|██▊ | 144/520 [25:36<23:13, 3.71s/it] 28%|██▊ | 145/520 [25:40<23:05, 3.69s/it] {'loss': 1.2649, 'grad_norm': 0.0006539335008088016, 'learning_rate': 0.4234402640071354, 'epoch': 0.28} + 28%|██▊ | 145/520 [25:40<23:05, 3.69s/it] 28%|██▊ | 146/520 [25:44<23:01, 3.69s/it] {'loss': 1.5663, 'grad_norm': 0.0007811051345928849, 'learning_rate': 0.42231458462098675, 'epoch': 0.28} + 28%|██▊ | 146/520 [25:44<23:01, 3.69s/it] 28%|██▊ | 147/520 [25:47<22:58, 3.69s/it] {'loss': 1.3041, 'grad_norm': 0.0006475242478110741, 'learning_rate': 0.4211822101003734, 'epoch': 0.28} + 28%|██▊ | 147/520 [25:47<22:58, 3.69s/it] 28%|██▊ | 148/520 [25:51<22:51, 3.69s/it] {'loss': 1.3453, 'grad_norm': 0.000657556497229881, 'learning_rate': 0.4200431844427298, 'epoch': 0.28} + 28%|██▊ | 148/520 [25:51<22:51, 3.69s/it] 29%|██▊ | 149/520 [25:55<22:57, 3.71s/it] {'loss': 1.293, 'grad_norm': 0.0006706525802153684, 'learning_rate': 0.4188975519039151, 'epoch': 0.29} + 29%|██▊ | 149/520 [25:55<22:57, 3.71s/it] 29%|██▉ | 150/520 [25:59<22:47, 3.69s/it] {'loss': 1.5085, 'grad_norm': 0.000662654240406751, 'learning_rate': 0.4177453569964925, 'epoch': 0.29} + 29%|██▉ | 150/520 [25:59<22:47, 3.69s/it] 29%|██▉ | 151/520 [26:02<22:42, 3.69s/it] {'loss': 1.3347, 'grad_norm': 0.0008720335067723814, 'learning_rate': 0.416586644488001, 'epoch': 0.29} + 29%|██▉ | 151/520 [26:02<22:42, 3.69s/it] 29%|██▉ | 152/520 [26:06<22:42, 3.70s/it] {'loss': 1.2956, 'grad_norm': 0.0007740440985252035, 'learning_rate': 0.4154214593992149, 'epoch': 0.29} + 29%|██▉ | 152/520 [26:06<22:42, 3.70s/it] 29%|██▉ | 153/520 [26:10<22:34, 3.69s/it] {'loss': 1.3266, 'grad_norm': 0.0007022081108425118, 'learning_rate': 0.41424984700239514, 'epoch': 0.29} + 29%|██▉ | 153/520 [26:10<22:34, 3.69s/it] 30%|██▉ | 154/520 [26:13<22:29, 3.69s/it] {'loss': 1.4207, 'grad_norm': 0.0007075287169492896, 'learning_rate': 0.4130718528195303, 'epoch': 0.3} + 30%|██▉ | 154/520 [26:13<22:29, 3.69s/it] 30%|██▉ | 155/520 [26:17<22:29, 3.70s/it] {'loss': 1.3186, 'grad_norm': 0.0006870894778336494, 'learning_rate': 0.4118875226205676, 'epoch': 0.3} + 30%|██▉ | 155/520 [26:17<22:29, 3.70s/it] 30%|███ | 156/520 [26:21<22:26, 3.70s/it] {'loss': 1.3671, 'grad_norm': 0.0007473610073061266, 'learning_rate': 0.4106969024216348, 'epoch': 0.3} + 30%|███ | 156/520 [26:21<22:26, 3.70s/it] 30%|███ | 157/520 [26:24<22:20, 3.69s/it] {'loss': 1.5702, 'grad_norm': 0.0007766162784113041, 'learning_rate': 0.40950003848325217, 'epoch': 0.3} + 30%|███ | 157/520 [26:24<22:20, 3.69s/it] 30%|███ | 158/520 [26:28<22:19, 3.70s/it] {'loss': 1.3293, 'grad_norm': 0.0007790056736773063, 'learning_rate': 0.408296977308535, 'epoch': 0.3} + 30%|███ | 158/520 [26:28<22:19, 3.70s/it] 31%|███ | 159/520 [26:32<22:17, 3.70s/it] {'loss': 1.358, 'grad_norm': 0.0007691814723951439, 'learning_rate': 0.4070877656413868, 'epoch': 0.31} + 31%|███ | 159/520 [26:32<22:17, 3.70s/it] 31%|███ | 160/520 [26:36<22:17, 3.72s/it] {'loss': 1.3967, 'grad_norm': 0.0006613702102598115, 'learning_rate': 0.4058724504646834, 'epoch': 0.31} + 31%|███ | 160/520 [26:36<22:17, 3.72s/it] 31%|███ | 161/520 [26:39<22:18, 3.73s/it] {'loss': 1.3645, 'grad_norm': 0.000669218151557085, 'learning_rate': 0.40465107899844704, 'epoch': 0.31} + 31%|███ | 161/520 [26:39<22:18, 3.73s/it] 31%|███ | 162/520 [26:43<22:13, 3.72s/it] {'loss': 1.4817, 'grad_norm': 0.000803282488784968, 'learning_rate': 0.40342369869801187, 'epoch': 0.31} + 31%|███ | 162/520 [26:43<22:13, 3.72s/it] 31%|███▏ | 163/520 [26:47<22:16, 3.74s/it] {'loss': 1.2714, 'grad_norm': 0.0007422368450399078, 'learning_rate': 0.40219035725218016, 'epoch': 0.31} + 31%|███▏ | 163/520 [26:47<22:16, 3.74s/it] 32%|███▏ | 164/520 [26:50<22:07, 3.73s/it] {'loss': 1.2244, 'grad_norm': 0.0006191720043214881, 'learning_rate': 0.40095110258136935, 'epoch': 0.32} + 32%|███▏ | 164/520 [26:50<22:07, 3.73s/it] 32%|███▏ | 165/520 [26:54<22:02, 3.73s/it] {'loss': 1.3578, 'grad_norm': 0.0006116995259690535, 'learning_rate': 0.3997059828357501, 'epoch': 0.32} + 32%|███▏ | 165/520 [26:54<22:02, 3.73s/it] 32%|███▏ | 166/520 [26:58<21:56, 3.72s/it] {'loss': 1.342, 'grad_norm': 0.0007877618950212885, 'learning_rate': 0.39845504639337537, 'epoch': 0.32} + 32%|███▏ | 166/520 [26:58<21:56, 3.72s/it] 32%|███▏ | 167/520 [27:02<21:48, 3.71s/it] {'loss': 1.3302, 'grad_norm': 0.0006546620038878204, 'learning_rate': 0.39719834185830116, 'epoch': 0.32} + 32%|███▏ | 167/520 [27:02<21:48, 3.71s/it] 32%|███▏ | 168/520 [27:05<21:45, 3.71s/it] {'loss': 1.2862, 'grad_norm': 0.0005995299017453027, 'learning_rate': 0.3959359180586975, 'epoch': 0.32} + 32%|███▏ | 168/520 [27:05<21:45, 3.71s/it] 32%|███▎ | 169/520 [27:09<21:38, 3.70s/it] {'loss': 1.3596, 'grad_norm': 0.0007272556632987516, 'learning_rate': 0.3946678240449515, 'epoch': 0.33} + 32%|███▎ | 169/520 [27:09<21:38, 3.70s/it] 33%|███▎ | 170/520 [27:13<21:34, 3.70s/it] {'loss': 1.4041, 'grad_norm': 0.000782104955331539, 'learning_rate': 0.3933941090877615, 'epoch': 0.33} + 33%|███▎ | 170/520 [27:13<21:34, 3.70s/it] 33%|███▎ | 171/520 [27:16<21:27, 3.69s/it] {'loss': 1.2923, 'grad_norm': 0.0006258460468481833, 'learning_rate': 0.3921148226762231, 'epoch': 0.33} + 33%|███▎ | 171/520 [27:16<21:27, 3.69s/it] 33%|███▎ | 172/520 [27:20<21:27, 3.70s/it] {'loss': 1.3731, 'grad_norm': 0.0007887159276852786, 'learning_rate': 0.3908300145159055, 'epoch': 0.33} + 33%|███▎ | 172/520 [27:20<21:27, 3.70s/it] 33%|███▎ | 173/520 [27:24<21:28, 3.71s/it] {'loss': 1.2972, 'grad_norm': 0.000640047999166732, 'learning_rate': 0.38953973452692103, 'epoch': 0.33} + 33%|███▎ | 173/520 [27:24<21:28, 3.71s/it] 33%|███▎ | 174/520 [27:28<21:27, 3.72s/it] {'loss': 1.3635, 'grad_norm': 0.0008082801502963679, 'learning_rate': 0.38824403284198483, 'epoch': 0.33} + 33%|███▎ | 174/520 [27:28<21:27, 3.72s/it] 34%|███▎ | 175/520 [27:31<21:18, 3.71s/it] {'loss': 1.2701, 'grad_norm': 0.0007234654148449696, 'learning_rate': 0.38694295980446786, 'epoch': 0.34} + 34%|███▎ | 175/520 [27:31<21:18, 3.71s/it] 34%|███▍ | 176/520 [27:35<21:12, 3.70s/it] {'loss': 1.4972, 'grad_norm': 0.0006623626331042538, 'learning_rate': 0.38563656596643986, 'epoch': 0.34} + 34%|███▍ | 176/520 [27:35<21:12, 3.70s/it] 34%|███▍ | 177/520 [27:39<21:05, 3.69s/it] {'loss': 1.3707, 'grad_norm': 0.0009047181649658972, 'learning_rate': 0.384324902086706, 'epoch': 0.34} + 34%|███▍ | 177/520 [27:39<21:05, 3.69s/it] 34%|███▍ | 178/520 [27:42<20:58, 3.68s/it] {'loss': 1.3503, 'grad_norm': 0.0007527230053257645, 'learning_rate': 0.38300801912883414, 'epoch': 0.34} + 34%|███▍ | 178/520 [27:42<20:58, 3.68s/it] 34%|███▍ | 179/520 [27:46<21:02, 3.70s/it] {'loss': 1.4348, 'grad_norm': 0.0007505912741849927, 'learning_rate': 0.38168596825917517, 'epoch': 0.34} + 34%|███▍ | 179/520 [27:46<21:02, 3.70s/it] 35%|███▍ | 180/520 [27:50<20:53, 3.69s/it] {'loss': 1.3278, 'grad_norm': 0.0006495493672731244, 'learning_rate': 0.3803588008448745, 'epoch': 0.35} + 35%|███▍ | 180/520 [27:50<20:53, 3.69s/it] 35%|███▍ | 181/520 [27:53<20:50, 3.69s/it] {'loss': 1.327, 'grad_norm': 0.0006336730404467006, 'learning_rate': 0.37902656845187666, 'epoch': 0.35} + 35%|███▍ | 181/520 [27:53<20:50, 3.69s/it] 35%|███▌ | 182/520 [27:57<20:46, 3.69s/it] {'loss': 1.3336, 'grad_norm': 0.0006160139563249369, 'learning_rate': 0.37768932284292145, 'epoch': 0.35} + 35%|███▌ | 182/520 [27:57<20:46, 3.69s/it] 35%|███▌ | 183/520 [28:01<20:42, 3.69s/it] {'loss': 1.3551, 'grad_norm': 0.000822160584906865, 'learning_rate': 0.3763471159755327, 'epoch': 0.35} + 35%|███▌ | 183/520 [28:01<20:42, 3.69s/it] 35%|███▌ | 184/520 [28:04<20:35, 3.68s/it] {'loss': 1.2849, 'grad_norm': 0.000858604941090723, 'learning_rate': 0.375, 'epoch': 0.35} + 35%|███▌ | 184/520 [28:04<20:35, 3.68s/it] 36%|███▌ | 185/520 [28:08<20:35, 3.69s/it] {'loss': 1.4355, 'grad_norm': 0.0007360773535903257, 'learning_rate': 0.37364802725735186, 'epoch': 0.36} + 36%|███▌ | 185/520 [28:08<20:35, 3.69s/it] 36%|███▌ | 186/520 [28:12<20:26, 3.67s/it] {'loss': 1.2931, 'grad_norm': 0.0006393370788419357, 'learning_rate': 0.37229125027732235, 'epoch': 0.36} + 36%|███▌ | 186/520 [28:12<20:26, 3.67s/it] 36%|███▌ | 187/520 [28:15<20:26, 3.68s/it] {'loss': 1.298, 'grad_norm': 0.0007218939949482912, 'learning_rate': 0.37092972177630995, 'epoch': 0.36} + 36%|███▌ | 187/520 [28:15<20:26, 3.68s/it] 36%|███▌ | 188/520 [28:19<20:20, 3.68s/it] {'loss': 1.3841, 'grad_norm': 0.0008365748236965764, 'learning_rate': 0.36956349465532956, 'epoch': 0.36} + 36%|███▌ | 188/520 [28:19<20:20, 3.68s/it] 36%|███▋ | 189/520 [28:23<20:16, 3.68s/it] {'loss': 1.4007, 'grad_norm': 0.000850641975895217, 'learning_rate': 0.36819262199795677, 'epoch': 0.36} + 36%|███▋ | 189/520 [28:23<20:16, 3.68s/it] 37%|███▋ | 190/520 [28:26<20:12, 3.67s/it] {'loss': 1.3074, 'grad_norm': 0.0007117737866984421, 'learning_rate': 0.3668171570682655, 'epoch': 0.37} + 37%|███▋ | 190/520 [28:26<20:12, 3.67s/it] 37%|███▋ | 191/520 [28:30<20:06, 3.67s/it] {'loss': 1.2752, 'grad_norm': 0.0005539903657390829, 'learning_rate': 0.36543715330875853, 'epoch': 0.37} + 37%|███▋ | 191/520 [28:30<20:06, 3.67s/it] 37%|███▋ | 192/520 [28:34<20:08, 3.68s/it] {'loss': 1.3542, 'grad_norm': 0.0006097164374069799, 'learning_rate': 0.36405266433829075, 'epoch': 0.37} + 37%|███▋ | 192/520 [28:34<20:08, 3.68s/it] 37%|███▋ | 193/520 [28:37<20:03, 3.68s/it] {'loss': 1.4343, 'grad_norm': 0.000784626204046534, 'learning_rate': 0.36266374394998635, 'epoch': 0.37} + 37%|███▋ | 193/520 [28:37<20:03, 3.68s/it] 37%|███▋ | 194/520 [28:41<20:15, 3.73s/it] {'loss': 1.3144, 'grad_norm': 0.000696130383077536, 'learning_rate': 0.36127044610914805, 'epoch': 0.37} + 37%|███▋ | 194/520 [28:41<20:15, 3.73s/it] 38%|███▊ | 195/520 [28:45<20:22, 3.76s/it] {'loss': 1.3655, 'grad_norm': 0.0006713551837322684, 'learning_rate': 0.35987282495116124, 'epoch': 0.38} + 38%|███▊ | 195/520 [28:45<20:22, 3.76s/it] 38%|███▊ | 196/520 [28:49<20:25, 3.78s/it] {'loss': 1.3372, 'grad_norm': 0.0006971366977151957, 'learning_rate': 0.3584709347793895, 'epoch': 0.38} + 38%|███▊ | 196/520 [28:49<20:25, 3.78s/it] 38%|███▊ | 197/520 [28:53<20:26, 3.80s/it] {'loss': 1.3019, 'grad_norm': 0.0006512304980286217, 'learning_rate': 0.35706483006306566, 'epoch': 0.38} + 38%|███▊ | 197/520 [28:53<20:26, 3.80s/it] 38%|███▊ | 198/520 [28:57<20:26, 3.81s/it] {'loss': 1.3873, 'grad_norm': 0.0007283925838347498, 'learning_rate': 0.3556545654351749, 'epoch': 0.38} + 38%|███▊ | 198/520 [28:57<20:26, 3.81s/it] 38%|███▊ | 199/520 [29:00<20:22, 3.81s/it] {'loss': 1.2889, 'grad_norm': 0.0006087084762779613, 'learning_rate': 0.35424019569033205, 'epoch': 0.38} + 38%|███▊ | 199/520 [29:00<20:22, 3.81s/it] 38%|███▊ | 200/520 [29:04<20:20, 3.81s/it] {'loss': 1.3722, 'grad_norm': 0.0007686532794438193, 'learning_rate': 0.35282177578265295, 'epoch': 0.38} + 38%|███▊ | 200/520 [29:04<20:20, 3.81s/it] 39%|███▊ | 201/520 [29:08<20:11, 3.80s/it] {'loss': 1.3974, 'grad_norm': 0.0005755052244802737, 'learning_rate': 0.3513993608236188, 'epoch': 0.39} + 39%|███▊ | 201/520 [29:08<20:11, 3.80s/it] 39%|███▉ | 202/520 [29:12<19:57, 3.76s/it] {'loss': 1.2751, 'grad_norm': 0.0006141953141477939, 'learning_rate': 0.3499730060799352, 'epoch': 0.39} + 39%|███▉ | 202/520 [29:12<19:57, 3.76s/it] 39%|███▉ | 203/520 [29:15<19:40, 3.73s/it] {'loss': 1.3335, 'grad_norm': 0.0007232991613448747, 'learning_rate': 0.34854276697138487, 'epoch': 0.39} + 39%|███▉ | 203/520 [29:15<19:40, 3.73s/it] 39%|███▉ | 204/520 [29:19<19:34, 3.72s/it] {'loss': 1.3699, 'grad_norm': 0.000641294407616276, 'learning_rate': 0.3471086990686737, 'epoch': 0.39} + 39%|███▉ | 204/520 [29:19<19:34, 3.72s/it] 39%|███▉ | 205/520 [29:23<19:37, 3.74s/it] {'loss': 1.4084, 'grad_norm': 0.0005690326861077342, 'learning_rate': 0.34567085809127246, 'epoch': 0.39} + 39%|███▉ | 205/520 [29:23<19:37, 3.74s/it] 40%|███▉ | 206/520 [29:27<19:32, 3.73s/it] {'loss': 1.4037, 'grad_norm': 0.0005974435128287704, 'learning_rate': 0.3442292999052513, 'epoch': 0.4} + 40%|███▉ | 206/520 [29:27<19:32, 3.73s/it] 40%|███▉ | 207/520 [29:30<19:22, 3.72s/it] {'loss': 1.375, 'grad_norm': 0.0007320662818815392, 'learning_rate': 0.3427840805211095, 'epoch': 0.4} + 40%|███▉ | 207/520 [29:30<19:22, 3.72s/it] 40%|████ | 208/520 [29:34<19:22, 3.73s/it] {'loss': 1.3642, 'grad_norm': 0.000643564474625073, 'learning_rate': 0.3413352560915988, 'epoch': 0.4} + 40%|████ | 208/520 [29:34<19:22, 3.73s/it] 40%|████ | 209/520 [29:38<19:15, 3.72s/it] {'loss': 1.2976, 'grad_norm': 0.000555521947035489, 'learning_rate': 0.3398828829095419, 'epoch': 0.4} + 40%|████ | 209/520 [29:38<19:15, 3.72s/it] 40%|████ | 210/520 [29:41<19:04, 3.69s/it] {'loss': 1.3567, 'grad_norm': 0.0006139241498503118, 'learning_rate': 0.33842701740564535, 'epoch': 0.4} + 40%|████ | 210/520 [29:41<19:04, 3.69s/it] 41%|████ | 211/520 [29:45<19:03, 3.70s/it] {'loss': 1.3739, 'grad_norm': 0.0005346320209451375, 'learning_rate': 0.3369677161463068, 'epoch': 0.41} + 41%|████ | 211/520 [29:45<19:03, 3.70s/it] 41%|████ | 212/520 [29:49<19:03, 3.71s/it] {'loss': 1.3756, 'grad_norm': 0.0006901491475240616, 'learning_rate': 0.3355050358314172, 'epoch': 0.41} + 41%|████ | 212/520 [29:49<19:03, 3.71s/it] 41%|████ | 213/520 [29:52<18:52, 3.69s/it] {'loss': 1.3098, 'grad_norm': 0.0006861554187222504, 'learning_rate': 0.3340390332921577, 'epoch': 0.41} + 41%|████ | 213/520 [29:52<18:52, 3.69s/it] 41%|████ | 214/520 [29:56<18:48, 3.69s/it] {'loss': 1.3246, 'grad_norm': 0.0008514178277526795, 'learning_rate': 0.3325697654887918, 'epoch': 0.41} + 41%|████ | 214/520 [29:56<18:48, 3.69s/it] 41%|████▏ | 215/520 [30:00<18:47, 3.70s/it] {'loss': 1.3256, 'grad_norm': 0.0008224002914434418, 'learning_rate': 0.3310972895084518, 'epoch': 0.41} + 41%|████▏ | 215/520 [30:00<18:47, 3.70s/it] 42%|████▏ | 216/520 [30:04<18:43, 3.70s/it] {'loss': 1.2109, 'grad_norm': 0.0006269464640495359, 'learning_rate': 0.32962166256292114, 'epoch': 0.42} + 42%|████▏ | 216/520 [30:04<18:43, 3.70s/it] 42%|████▏ | 217/520 [30:07<18:40, 3.70s/it] {'loss': 1.3619, 'grad_norm': 0.0007924204472608621, 'learning_rate': 0.32814294198641114, 'epoch': 0.42} + 42%|████▏ | 217/520 [30:07<18:40, 3.70s/it] 42%|████▏ | 218/520 [30:11<18:32, 3.68s/it] {'loss': 1.3472, 'grad_norm': 0.0007296924890081479, 'learning_rate': 0.3266611852333336, 'epoch': 0.42} + 42%|████▏ | 218/520 [30:11<18:32, 3.68s/it] 42%|████▏ | 219/520 [30:15<18:31, 3.69s/it] {'loss': 1.3442, 'grad_norm': 0.0005860495190422063, 'learning_rate': 0.32517644987606825, 'epoch': 0.42} + 42%|████▏ | 219/520 [30:15<18:31, 3.69s/it] 42%|████▏ | 220/520 [30:18<18:28, 3.70s/it] {'loss': 1.3731, 'grad_norm': 0.0006516619340637582, 'learning_rate': 0.32368879360272607, 'epoch': 0.42} + 42%|████▏ | 220/520 [30:18<18:28, 3.70s/it] 42%|████▎ | 221/520 [30:22<18:22, 3.69s/it] {'loss': 1.3552, 'grad_norm': 0.0007034762342567377, 'learning_rate': 0.3221982742149075, 'epoch': 0.42} + 42%|████▎ | 221/520 [30:22<18:22, 3.69s/it] 43%|████▎ | 222/520 [30:26<18:18, 3.69s/it] {'loss': 1.2699, 'grad_norm': 0.0005455325057768823, 'learning_rate': 0.32070494962545687, 'epoch': 0.43} + 43%|████▎ | 222/520 [30:26<18:18, 3.69s/it] 43%|████▎ | 223/520 [30:29<18:18, 3.70s/it] {'loss': 1.2703, 'grad_norm': 0.0005903336483124005, 'learning_rate': 0.3192088778562123, 'epoch': 0.43} + 43%|████▎ | 223/520 [30:29<18:18, 3.70s/it] 43%|████▎ | 224/520 [30:33<18:16, 3.71s/it] {'loss': 1.5745, 'grad_norm': 0.000655105572724109, 'learning_rate': 0.3177101170357513, 'epoch': 0.43} + 43%|████▎ | 224/520 [30:33<18:16, 3.71s/it] 43%|████▎ | 225/520 [30:37<18:21, 3.73s/it] {'loss': 1.2694, 'grad_norm': 0.000620860466040869, 'learning_rate': 0.3162087253971318, 'epoch': 0.43} + 43%|████▎ | 225/520 [30:37<18:21, 3.73s/it] 43%|████▎ | 226/520 [30:41<18:13, 3.72s/it] {'loss': 1.3811, 'grad_norm': 0.0005592255960390186, 'learning_rate': 0.3147047612756302, 'epoch': 0.43} + 43%|████▎ | 226/520 [30:41<18:13, 3.72s/it] 44%|████▎ | 227/520 [30:44<18:06, 3.71s/it] {'loss': 1.3673, 'grad_norm': 0.0005747717246815532, 'learning_rate': 0.31319828310647435, 'epoch': 0.44} + 44%|████▎ | 227/520 [30:44<18:06, 3.71s/it] 44%|████▍ | 228/520 [30:48<18:06, 3.72s/it] {'loss': 1.5108, 'grad_norm': 0.0006979862539480199, 'learning_rate': 0.31168934942257337, 'epoch': 0.44} + 44%|████▍ | 228/520 [30:48<18:06, 3.72s/it] 44%|████▍ | 229/520 [30:52<17:58, 3.70s/it] {'loss': 1.35, 'grad_norm': 0.0006704785828988194, 'learning_rate': 0.3101780188522433, 'epoch': 0.44} + 44%|████▍ | 229/520 [30:52<17:58, 3.70s/it] 44%|████▍ | 230/520 [30:55<17:59, 3.72s/it] {'loss': 1.2261, 'grad_norm': 0.0005574150512644602, 'learning_rate': 0.30866435011692883, 'epoch': 0.44} + 44%|████▍ | 230/520 [30:55<17:59, 3.72s/it] 44%|████▍ | 231/520 [30:59<17:55, 3.72s/it] {'loss': 1.2985, 'grad_norm': 0.0005881352862705204, 'learning_rate': 0.3071484020289224, 'epoch': 0.44} + 44%|████▍ | 231/520 [30:59<17:55, 3.72s/it] 45%|████▍ | 232/520 [31:03<17:52, 3.72s/it] {'loss': 1.5373, 'grad_norm': 0.0005958709808672534, 'learning_rate': 0.3056302334890786, 'epoch': 0.45} + 45%|████▍ | 232/520 [31:03<17:52, 3.72s/it] 45%|████▍ | 233/520 [31:07<17:51, 3.73s/it] {'loss': 1.4079, 'grad_norm': 0.0007300511773140763, 'learning_rate': 0.3041099034845257, 'epoch': 0.45} + 45%|████▍ | 233/520 [31:07<17:51, 3.73s/it] 45%|████▌ | 234/520 [31:10<17:44, 3.72s/it] {'loss': 1.2489, 'grad_norm': 0.0005543670267573063, 'learning_rate': 0.30258747108637396, 'epoch': 0.45} + 45%|████▌ | 234/520 [31:10<17:44, 3.72s/it] 45%|████▌ | 235/520 [31:14<17:39, 3.72s/it] {'loss': 1.2945, 'grad_norm': 0.00060766378130782, 'learning_rate': 0.3010629954474201, 'epoch': 0.45} + 45%|████▌ | 235/520 [31:14<17:39, 3.72s/it] 45%|████▌ | 236/520 [31:18<17:35, 3.72s/it] {'loss': 1.3997, 'grad_norm': 0.0005761340292658285, 'learning_rate': 0.2995365357998494, 'epoch': 0.45} + 45%|████▌ | 236/520 [31:18<17:35, 3.72s/it] 46%|████▌ | 237/520 [31:21<17:28, 3.71s/it] {'loss': 1.3621, 'grad_norm': 0.0005762967818184447, 'learning_rate': 0.2980081514529341, 'epoch': 0.46} + 46%|████▌ | 237/520 [31:21<17:28, 3.71s/it] 46%|████▌ | 238/520 [31:25<17:25, 3.71s/it] {'loss': 1.3135, 'grad_norm': 0.0006613368171823524, 'learning_rate': 0.2964779017907287, 'epoch': 0.46} + 46%|████▌ | 238/520 [31:25<17:25, 3.71s/it] 46%|████▌ | 239/520 [31:29<17:23, 3.71s/it] {'loss': 1.4044, 'grad_norm': 0.0006969575241158033, 'learning_rate': 0.29494584626976317, 'epoch': 0.46} + 46%|████▌ | 239/520 [31:29<17:23, 3.71s/it] 46%|████▌ | 240/520 [31:33<17:21, 3.72s/it] {'loss': 1.182, 'grad_norm': 0.0005317027253225191, 'learning_rate': 0.29341204441673263, 'epoch': 0.46} + 46%|████▌ | 240/520 [31:33<17:21, 3.72s/it] 46%|████▋ | 241/520 [31:36<17:14, 3.71s/it] {'loss': 1.252, 'grad_norm': 0.0005313288058705795, 'learning_rate': 0.2918765558261841, 'epoch': 0.46} + 46%|████▋ | 241/520 [31:36<17:14, 3.71s/it] 47%|████▋ | 242/520 [31:40<17:08, 3.70s/it] {'loss': 1.2848, 'grad_norm': 0.0005259099643801832, 'learning_rate': 0.2903394401582017, 'epoch': 0.47} + 47%|████▋ | 242/520 [31:40<17:08, 3.70s/it] 47%|████▋ | 243/520 [31:44<17:02, 3.69s/it] {'loss': 1.2856, 'grad_norm': 0.0005724106227708019, 'learning_rate': 0.2888007571360879, 'epoch': 0.47} + 47%|████▋ | 243/520 [31:44<17:02, 3.69s/it] 47%|████▋ | 244/520 [31:47<16:57, 3.69s/it] {'loss': 1.4031, 'grad_norm': 0.0006829842725574088, 'learning_rate': 0.2872605665440436, 'epoch': 0.47} + 47%|████▋ | 244/520 [31:47<16:57, 3.69s/it] 47%|████▋ | 245/520 [31:51<16:51, 3.68s/it] {'loss': 1.2697, 'grad_norm': 0.0005824979209692688, 'learning_rate': 0.285718928224845, 'epoch': 0.47} + 47%|████▋ | 245/520 [31:51<16:51, 3.68s/it] 47%|████▋ | 246/520 [31:55<16:54, 3.70s/it] {'loss': 1.5112, 'grad_norm': 0.0006823166817756572, 'learning_rate': 0.28417590207751836, 'epoch': 0.47} + 47%|████▋ | 246/520 [31:55<16:54, 3.70s/it] 48%|████▊ | 247/520 [31:58<16:50, 3.70s/it] {'loss': 1.4327, 'grad_norm': 0.0006886869719302235, 'learning_rate': 0.28263154805501295, 'epoch': 0.47} + 48%|████▊ | 247/520 [31:58<16:50, 3.70s/it] 48%|████▊ | 248/520 [32:02<16:42, 3.69s/it] {'loss': 1.2607, 'grad_norm': 0.0005783970572128483, 'learning_rate': 0.28108592616187134, 'epoch': 0.48} + 48%|████▊ | 248/520 [32:02<16:42, 3.69s/it] 48%|████▊ | 249/520 [32:06<16:41, 3.69s/it] {'loss': 1.3785, 'grad_norm': 0.000629985928608532, 'learning_rate': 0.27953909645189823, 'epoch': 0.48} + 48%|████▊ | 249/520 [32:06<16:41, 3.69s/it] 48%|████▊ | 250/520 [32:09<16:34, 3.68s/it] {'loss': 1.3108, 'grad_norm': 0.0006178845474563887, 'learning_rate': 0.27799111902582696, 'epoch': 0.48} + 48%|████▊ | 250/520 [32:09<16:34, 3.68s/it] 48%|████▊ | 251/520 [32:13<16:34, 3.70s/it] {'loss': 1.3751, 'grad_norm': 0.0005502657476475022, 'learning_rate': 0.2764420540289845, 'epoch': 0.48} + 48%|████▊ | 251/520 [32:13<16:34, 3.70s/it] 48%|████▊ | 252/520 [32:17<16:31, 3.70s/it] {'loss': 1.3913, 'grad_norm': 0.0007443222042526833, 'learning_rate': 0.2748919616489542, 'epoch': 0.48} + 48%|████▊ | 252/520 [32:17<16:31, 3.70s/it] 49%|████▊ | 253/520 [32:21<16:30, 3.71s/it] {'loss': 1.3594, 'grad_norm': 0.0006929428376923118, 'learning_rate': 0.27334090211323764, 'epoch': 0.49} + 49%|████▊ | 253/520 [32:21<16:30, 3.71s/it] 49%|████▉ | 254/520 [32:24<16:23, 3.70s/it] {'loss': 1.2838, 'grad_norm': 0.0006084341543898582, 'learning_rate': 0.2717889356869146, 'epoch': 0.49} + 49%|████▉ | 254/520 [32:24<16:23, 3.70s/it] 49%|████▉ | 255/520 [32:28<16:31, 3.74s/it] {'loss': 1.2967, 'grad_norm': 0.0006422771458315834, 'learning_rate': 0.27023612267030084, 'epoch': 0.49} + 49%|████▉ | 255/520 [32:28<16:31, 3.74s/it] 49%|████▉ | 256/520 [32:32<16:19, 3.71s/it] {'loss': 1.3484, 'grad_norm': 0.000558163334424603, 'learning_rate': 0.2686825233966061, 'epoch': 0.49} + 49%|████▉ | 256/520 [32:32<16:19, 3.71s/it] 49%|████▉ | 257/520 [32:35<16:14, 3.71s/it] {'loss': 1.3519, 'grad_norm': 0.0006140439761981928, 'learning_rate': 0.26712819822958916, 'epoch': 0.49} + 49%|████▉ | 257/520 [32:35<16:14, 3.71s/it] 50%|████▉ | 258/520 [32:39<16:10, 3.71s/it] {'loss': 1.3573, 'grad_norm': 0.0005761074865602849, 'learning_rate': 0.26557320756121305, 'epoch': 0.5} + 50%|████▉ | 258/520 [32:39<16:10, 3.71s/it] 50%|████▉ | 259/520 [32:43<16:05, 3.70s/it] {'loss': 1.4266, 'grad_norm': 0.0006130837230014523, 'learning_rate': 0.26401761180929795, 'epoch': 0.5} + 50%|████▉ | 259/520 [32:43<16:05, 3.70s/it] 50%|█████ | 260/520 [32:47<16:04, 3.71s/it] {'loss': 1.4803, 'grad_norm': 0.0005660634015373176, 'learning_rate': 0.2624614714151743, 'epoch': 0.5} + 50%|█████ | 260/520 [32:47<16:04, 3.71s/it] 50%|█████ | 261/520 [32:50<15:59, 3.70s/it] {'loss': 1.4147, 'grad_norm': 0.0006242601793113003, 'learning_rate': 0.260904846841334, 'epoch': 0.5} + 50%|█████ | 261/520 [32:50<15:59, 3.70s/it] 50%|█████ | 262/520 [32:54<15:53, 3.70s/it] {'loss': 1.285, 'grad_norm': 0.0005721021369226827, 'learning_rate': 0.25934779856908147, 'epoch': 0.5} + 50%|█████ | 262/520 [32:54<15:53, 3.70s/it] 51%|█████ | 263/520 [32:58<15:51, 3.70s/it] {'loss': 1.4251, 'grad_norm': 0.0005810363048187466, 'learning_rate': 0.2577903870961833, 'epoch': 0.51} + 51%|█████ | 263/520 [32:58<15:51, 3.70s/it] 51%|█████ | 264/520 [33:01<15:46, 3.70s/it] {'loss': 1.3817, 'grad_norm': 0.0005165248884554377, 'learning_rate': 0.25623267293451824, 'epoch': 0.51} + 51%|█████ | 264/520 [33:01<15:46, 3.70s/it] 51%|█████ | 265/520 [33:05<15:43, 3.70s/it] {'loss': 1.2866, 'grad_norm': 0.0006195303428872849, 'learning_rate': 0.25467471660772556, 'epoch': 0.51} + 51%|█████ | 265/520 [33:05<15:43, 3.70s/it] 51%|█████ | 266/520 [33:09<15:39, 3.70s/it] {'loss': 1.1448, 'grad_norm': 0.000563205766814521, 'learning_rate': 0.2531165786488538, 'epoch': 0.51} + 51%|█████ | 266/520 [33:09<15:39, 3.70s/it] 51%|█████▏ | 267/520 [33:12<15:35, 3.70s/it] {'loss': 1.2894, 'grad_norm': 0.0005143611547204946, 'learning_rate': 0.2515583195980084, 'epoch': 0.51} + 51%|█████▏ | 267/520 [33:12<15:35, 3.70s/it] 52%|█████▏ | 268/520 [33:16<15:30, 3.69s/it] {'loss': 1.5558, 'grad_norm': 0.0007435170469654767, 'learning_rate': 0.25, 'epoch': 0.52} + 52%|█████▏ | 268/520 [33:16<15:30, 3.69s/it] 52%|█████▏ | 269/520 [33:20<15:25, 3.69s/it] {'loss': 1.3834, 'grad_norm': 0.0005465087786553953, 'learning_rate': 0.2484416804019916, 'epoch': 0.52} + 52%|█████▏ | 269/520 [33:20<15:25, 3.69s/it] 52%|█████▏ | 270/520 [33:24<15:25, 3.70s/it] {'loss': 1.3527, 'grad_norm': 0.000645593782591963, 'learning_rate': 0.24688342135114624, 'epoch': 0.52} + 52%|█████▏ | 270/520 [33:24<15:25, 3.70s/it] 52%|█████▏ | 271/520 [33:27<15:32, 3.74s/it] {'loss': 1.3795, 'grad_norm': 0.0005762290013918475, 'learning_rate': 0.2453252833922745, 'epoch': 0.52} + 52%|█████▏ | 271/520 [33:27<15:32, 3.74s/it] 52%|█████▏ | 272/520 [33:31<15:29, 3.75s/it] {'loss': 1.3682, 'grad_norm': 0.0006355432279181258, 'learning_rate': 0.24376732706548182, 'epoch': 0.52} + 52%|█████▏ | 272/520 [33:31<15:29, 3.75s/it] 52%|█████▎ | 273/520 [33:35<15:31, 3.77s/it] {'loss': 1.4712, 'grad_norm': 0.0005494271882424106, 'learning_rate': 0.24220961290381676, 'epoch': 0.53} + 52%|█████▎ | 273/520 [33:35<15:31, 3.77s/it] 53%|█████▎ | 274/520 [33:39<15:28, 3.77s/it] {'loss': 1.3458, 'grad_norm': 0.0006183732427120386, 'learning_rate': 0.24065220143091862, 'epoch': 0.53} + 53%|█████▎ | 274/520 [33:39<15:28, 3.77s/it] 53%|█████▎ | 275/520 [33:43<15:25, 3.78s/it] {'loss': 1.2818, 'grad_norm': 0.0006861696864918689, 'learning_rate': 0.23909515315866603, 'epoch': 0.53} + 53%|█████▎ | 275/520 [33:43<15:25, 3.78s/it] 53%|█████▎ | 276/520 [33:46<15:23, 3.79s/it] {'loss': 1.3591, 'grad_norm': 0.0008206450679113053, 'learning_rate': 0.2375385285848257, 'epoch': 0.53} + 53%|█████▎ | 276/520 [33:46<15:23, 3.79s/it] 53%|█████▎ | 277/520 [33:50<15:21, 3.79s/it] {'loss': 1.4882, 'grad_norm': 0.0005816224287152024, 'learning_rate': 0.23598238819070202, 'epoch': 0.53} + 53%|█████▎ | 277/520 [33:50<15:21, 3.79s/it] 53%|█████▎ | 278/520 [33:54<15:20, 3.80s/it] {'loss': 1.2502, 'grad_norm': 0.0005321504163271778, 'learning_rate': 0.23442679243878697, 'epoch': 0.53} + 53%|█████▎ | 278/520 [33:54<15:20, 3.80s/it] 54%|█████▎ | 279/520 [33:58<15:15, 3.80s/it] {'loss': 1.3838, 'grad_norm': 0.0006795509024642638, 'learning_rate': 0.23287180177041084, 'epoch': 0.54} + 54%|█████▎ | 279/520 [33:58<15:15, 3.80s/it] 54%|█████▍ | 280/520 [34:02<15:09, 3.79s/it] {'loss': 1.2841, 'grad_norm': 0.0006370554828751741, 'learning_rate': 0.23131747660339394, 'epoch': 0.54} + 54%|█████▍ | 280/520 [34:02<15:09, 3.79s/it] 54%|█████▍ | 281/520 [34:05<15:08, 3.80s/it] {'loss': 1.3997, 'grad_norm': 0.000592391436362615, 'learning_rate': 0.22976387732969916, 'epoch': 0.54} + 54%|█████▍ | 281/520 [34:05<15:08, 3.80s/it] 54%|█████▍ | 282/520 [34:09<15:03, 3.80s/it] {'loss': 1.2559, 'grad_norm': 0.0005134276321804436, 'learning_rate': 0.22821106431308544, 'epoch': 0.54} + 54%|█████▍ | 282/520 [34:09<15:03, 3.80s/it] 54%|█████▍ | 283/520 [34:13<14:59, 3.80s/it] {'loss': 1.4213, 'grad_norm': 0.0006656412528854676, 'learning_rate': 0.22665909788676236, 'epoch': 0.54} + 54%|█████▍ | 283/520 [34:13<14:59, 3.80s/it] 55%|█████▍ | 284/520 [34:17<15:00, 3.82s/it] {'loss': 1.3647, 'grad_norm': 0.0006319599283723508, 'learning_rate': 0.22510803835104587, 'epoch': 0.55} + 55%|█████▍ | 284/520 [34:17<15:00, 3.82s/it] 55%|█████▍ | 285/520 [34:21<14:57, 3.82s/it] {'loss': 1.2803, 'grad_norm': 0.0005820463068891952, 'learning_rate': 0.22355794597101558, 'epoch': 0.55} + 55%|█████▍ | 285/520 [34:21<14:57, 3.82s/it] 55%|█████▌ | 286/520 [34:24<14:52, 3.81s/it] {'loss': 1.1584, 'grad_norm': 0.0007193869883658863, 'learning_rate': 0.22200888097417304, 'epoch': 0.55} + 55%|█████▌ | 286/520 [34:24<14:52, 3.81s/it] 55%|█████▌ | 287/520 [34:28<14:44, 3.80s/it] {'loss': 1.3984, 'grad_norm': 0.0005554887578719807, 'learning_rate': 0.2204609035481018, 'epoch': 0.55} + 55%|█████▌ | 287/520 [34:28<14:44, 3.80s/it] 55%|█████▌ | 288/520 [34:32<14:34, 3.77s/it] {'loss': 1.4507, 'grad_norm': 0.0005883542777152374, 'learning_rate': 0.21891407383812878, 'epoch': 0.55} + 55%|█████▌ | 288/520 [34:32<14:34, 3.77s/it] 56%|█████▌ | 289/520 [34:36<14:31, 3.77s/it] {'loss': 1.2955, 'grad_norm': 0.0005573791658014934, 'learning_rate': 0.21736845194498716, 'epoch': 0.56} + 56%|█████▌ | 289/520 [34:36<14:31, 3.77s/it] 56%|█████▌ | 290/520 [34:39<14:23, 3.75s/it] {'loss': 1.2265, 'grad_norm': 0.0007116298193868153, 'learning_rate': 0.2158240979224817, 'epoch': 0.56} + 56%|█████▌ | 290/520 [34:39<14:23, 3.75s/it] 56%|█████▌ | 291/520 [34:43<14:12, 3.72s/it] {'loss': 1.2644, 'grad_norm': 0.0005126187518210042, 'learning_rate': 0.21428107177515504, 'epoch': 0.56} + 56%|█████▌ | 291/520 [34:43<14:12, 3.72s/it] 56%|█████▌ | 292/520 [34:47<14:08, 3.72s/it] {'loss': 1.3278, 'grad_norm': 0.0005478125872972273, 'learning_rate': 0.21273943345595636, 'epoch': 0.56} + 56%|█████▌ | 292/520 [34:47<14:08, 3.72s/it] 56%|█████▋ | 293/520 [34:50<14:04, 3.72s/it] {'loss': 1.2598, 'grad_norm': 0.0005824091212003222, 'learning_rate': 0.2111992428639121, 'epoch': 0.56} + 56%|█████▋ | 293/520 [34:51<14:04, 3.72s/it] 57%|█████▋ | 294/520 [34:54<14:06, 3.75s/it] {'loss': 1.2953, 'grad_norm': 0.0005849516450985008, 'learning_rate': 0.20966055984179832, 'epoch': 0.57} + 57%|█████▋ | 294/520 [34:54<14:06, 3.75s/it] 57%|█████▋ | 295/520 [34:58<14:06, 3.76s/it] {'loss': 1.4508, 'grad_norm': 0.0006554702008133624, 'learning_rate': 0.20812344417381592, 'epoch': 0.57} + 57%|█████▋ | 295/520 [34:58<14:06, 3.76s/it] 57%|█████▋ | 296/520 [35:02<14:03, 3.77s/it] {'loss': 1.2512, 'grad_norm': 0.0007006865210190524, 'learning_rate': 0.20658795558326742, 'epoch': 0.57} + 57%|█████▋ | 296/520 [35:02<14:03, 3.77s/it] 57%|█████▋ | 297/520 [35:06<14:03, 3.78s/it] {'loss': 1.3786, 'grad_norm': 0.0007030951256048718, 'learning_rate': 0.20505415373023683, 'epoch': 0.57} + 57%|█████▋ | 297/520 [35:06<14:03, 3.78s/it] 57%|█████▋ | 298/520 [35:10<14:01, 3.79s/it] {'loss': 1.3448, 'grad_norm': 0.0004917599697747976, 'learning_rate': 0.20352209820927136, 'epoch': 0.57} + 57%|█████▋ | 298/520 [35:10<14:01, 3.79s/it] 57%|█████▊ | 299/520 [35:13<13:55, 3.78s/it] {'loss': 1.4556, 'grad_norm': 0.0006037589821998732, 'learning_rate': 0.20199184854706598, 'epoch': 0.57} + 57%|█████▊ | 299/520 [35:13<13:55, 3.78s/it] 58%|█████▊ | 300/520 [35:17<13:51, 3.78s/it] {'loss': 1.3945, 'grad_norm': 0.0005397575773844732, 'learning_rate': 0.20046346420015065, 'epoch': 0.58} + 58%|█████▊ | 300/520 [35:17<13:51, 3.78s/it] 58%|█████▊ | 301/520 [35:21<13:38, 3.74s/it] {'loss': 1.3867, 'grad_norm': 0.0005678734059198421, 'learning_rate': 0.19893700455257995, 'epoch': 0.58} + 58%|█████▊ | 301/520 [35:21<13:38, 3.74s/it] 58%|█████▊ | 302/520 [35:24<13:37, 3.75s/it] {'loss': 1.4711, 'grad_norm': 0.0005329346211523242, 'learning_rate': 0.1974125289136261, 'epoch': 0.58} + 58%|█████▊ | 302/520 [35:24<13:37, 3.75s/it] 58%|█████▊ | 303/520 [35:28<13:32, 3.75s/it] {'loss': 1.2915, 'grad_norm': 0.0007205313374600103, 'learning_rate': 0.1958900965154743, 'epoch': 0.58} + 58%|█████▊ | 303/520 [35:28<13:32, 3.75s/it] 58%|█████▊ | 304/520 [35:32<13:25, 3.73s/it] {'loss': 1.3606, 'grad_norm': 0.0005997874592036418, 'learning_rate': 0.19436976651092142, 'epoch': 0.58} + 58%|█████▊ | 304/520 [35:32<13:25, 3.73s/it] 59%|█████▊ | 305/520 [35:36<13:19, 3.72s/it] {'loss': 1.4329, 'grad_norm': 0.0006594119170499048, 'learning_rate': 0.19285159797107765, 'epoch': 0.59} + 59%|█████▊ | 305/520 [35:36<13:19, 3.72s/it] 59%|█████▉ | 306/520 [35:39<13:14, 3.71s/it] {'loss': 1.3448, 'grad_norm': 0.0006820573291803758, 'learning_rate': 0.19133564988307125, 'epoch': 0.59} + 59%|█████▉ | 306/520 [35:39<13:14, 3.71s/it] 59%|█████▉ | 307/520 [35:43<13:05, 3.69s/it] {'loss': 1.2967, 'grad_norm': 0.0005229446378099429, 'learning_rate': 0.1898219811477568, 'epoch': 0.59} + 59%|█████▉ | 307/520 [35:43<13:05, 3.69s/it] 59%|█████▉ | 308/520 [35:47<13:04, 3.70s/it] {'loss': 1.4078, 'grad_norm': 0.0006952869682785916, 'learning_rate': 0.18831065057742657, 'epoch': 0.59} + 59%|█████▉ | 308/520 [35:47<13:04, 3.70s/it] 59%|█████▉ | 309/520 [35:51<13:35, 3.86s/it] {'loss': 1.2957, 'grad_norm': 0.0006307187307094942, 'learning_rate': 0.1868017168935256, 'epoch': 0.59} + 59%|█████▉ | 309/520 [35:51<13:35, 3.86s/it] 60%|█████▉ | 310/520 [35:55<13:24, 3.83s/it] {'loss': 1.2613, 'grad_norm': 0.0006513758630337263, 'learning_rate': 0.1852952387243698, 'epoch': 0.6} + 60%|█████▉ | 310/520 [35:55<13:24, 3.83s/it] 60%|█████▉ | 311/520 [35:58<13:16, 3.81s/it] {'loss': 1.2464, 'grad_norm': 0.0007460339785286591, 'learning_rate': 0.18379127460286818, 'epoch': 0.6} + 60%|█████▉ | 311/520 [35:58<13:16, 3.81s/it] 60%|██████ | 312/520 [36:02<13:28, 3.89s/it] {'loss': 1.2276, 'grad_norm': 0.0007996004659070194, 'learning_rate': 0.18228988296424875, 'epoch': 0.6} + 60%|██████ | 312/520 [36:02<13:28, 3.89s/it] 60%|██████ | 313/520 [36:06<13:11, 3.82s/it] {'loss': 1.2181, 'grad_norm': 0.0004875411178163503, 'learning_rate': 0.18079112214378767, 'epoch': 0.6} + 60%|██████ | 313/520 [36:06<13:11, 3.82s/it] 60%|██████ | 314/520 [36:10<13:16, 3.86s/it] {'loss': 1.2513, 'grad_norm': 0.0005368700378429772, 'learning_rate': 0.17929505037454313, 'epoch': 0.6} + 60%|██████ | 314/520 [36:10<13:16, 3.86s/it] 61%|██████ | 315/520 [36:14<12:59, 3.80s/it] {'loss': 1.4551, 'grad_norm': 0.0006070192349674131, 'learning_rate': 0.17780172578509257, 'epoch': 0.61} + 61%|██████ | 315/520 [36:14<12:59, 3.80s/it] 61%|██████ | 316/520 [36:18<13:15, 3.90s/it] {'loss': 1.2429, 'grad_norm': 0.0006120009721636403, 'learning_rate': 0.17631120639727393, 'epoch': 0.61} + 61%|██████ | 316/520 [36:18<13:15, 3.90s/it] 61%|██████ | 317/520 [36:22<12:58, 3.84s/it] {'loss': 1.2475, 'grad_norm': 0.0004513959173268144, 'learning_rate': 0.17482355012393175, 'epoch': 0.61} + 61%|██████ | 317/520 [36:22<12:58, 3.84s/it] 61%|██████ | 318/520 [36:25<12:45, 3.79s/it] {'loss': 1.3741, 'grad_norm': 0.0005625007545579886, 'learning_rate': 0.17333881476666646, 'epoch': 0.61} + 61%|██████ | 318/520 [36:25<12:45, 3.79s/it] 61%|██████▏ | 319/520 [36:29<13:00, 3.88s/it] {'loss': 1.2405, 'grad_norm': 0.0005120477835274228, 'learning_rate': 0.17185705801358891, 'epoch': 0.61} + 61%|██████▏ | 319/520 [36:29<13:00, 3.88s/it] 62%|██████▏ | 320/520 [36:33<12:42, 3.81s/it] {'loss': 1.177, 'grad_norm': 0.0005523133705405086, 'learning_rate': 0.17037833743707892, 'epoch': 0.62} + 62%|██████▏ | 320/520 [36:33<12:42, 3.81s/it] 62%|██████▏ | 321/520 [36:37<12:32, 3.78s/it] {'loss': 1.3881, 'grad_norm': 0.0006282207330524446, 'learning_rate': 0.16890271049154826, 'epoch': 0.62} + 62%|██████▏ | 321/520 [36:37<12:32, 3.78s/it] 62%|██████▏ | 322/520 [36:40<12:24, 3.76s/it] {'loss': 1.2933, 'grad_norm': 0.0007303045828569379, 'learning_rate': 0.1674302345112083, 'epoch': 0.62} + 62%|██████▏ | 322/520 [36:40<12:24, 3.76s/it] 62%|██████▏ | 323/520 [36:44<12:15, 3.73s/it] {'loss': 1.3917, 'grad_norm': 0.0006412450488746506, 'learning_rate': 0.16596096670784236, 'epoch': 0.62} + 62%|██████▏ | 323/520 [36:44<12:15, 3.73s/it] 62%|██████▏ | 324/520 [36:48<12:08, 3.71s/it] {'loss': 1.3261, 'grad_norm': 0.0005691188340657359, 'learning_rate': 0.16449496416858284, 'epoch': 0.62} + 62%|██████▏ | 324/520 [36:48<12:08, 3.71s/it] 62%|██████▎ | 325/520 [36:51<12:03, 3.71s/it] {'loss': 1.3236, 'grad_norm': 0.0006372823222550873, 'learning_rate': 0.16303228385369317, 'epoch': 0.62} + 62%|██████▎ | 325/520 [36:51<12:03, 3.71s/it] 63%|██████▎ | 326/520 [36:55<12:02, 3.72s/it] {'loss': 1.3151, 'grad_norm': 0.0005149912409556641, 'learning_rate': 0.16157298259435465, 'epoch': 0.63} + 63%|██████▎ | 326/520 [36:55<12:02, 3.72s/it] 63%|██████▎ | 327/520 [36:59<11:55, 3.71s/it] {'loss': 1.4427, 'grad_norm': 0.0006149595530281986, 'learning_rate': 0.16011711709045812, 'epoch': 0.63} + 63%|██████▎ | 327/520 [36:59<11:55, 3.71s/it] 63%|██████▎ | 328/520 [37:03<11:46, 3.68s/it] {'loss': 1.382, 'grad_norm': 0.0005843551298268889, 'learning_rate': 0.15866474390840124, 'epoch': 0.63} + 63%|██████▎ | 328/520 [37:03<11:46, 3.68s/it] 63%|██████▎ | 329/520 [37:06<11:48, 3.71s/it] {'loss': 1.2315, 'grad_norm': 0.0004945771874225166, 'learning_rate': 0.15721591947889052, 'epoch': 0.63} + 63%|██████▎ | 329/520 [37:06<11:48, 3.71s/it] 63%|██████▎ | 330/520 [37:10<11:53, 3.75s/it] {'loss': 1.3175, 'grad_norm': 0.0005573039097746196, 'learning_rate': 0.1557707000947487, 'epoch': 0.63} + 63%|██████▎ | 330/520 [37:10<11:53, 3.75s/it] 64%|██████▎ | 331/520 [37:14<11:55, 3.78s/it] {'loss': 1.2757, 'grad_norm': 0.0006136487185254573, 'learning_rate': 0.15432914190872757, 'epoch': 0.64} + 64%|██████▎ | 331/520 [37:14<11:55, 3.78s/it] 64%|██████▍ | 332/520 [37:18<11:57, 3.81s/it] {'loss': 1.4472, 'grad_norm': 0.0005771681964017225, 'learning_rate': 0.15289130093132633, 'epoch': 0.64} + 64%|██████▍ | 332/520 [37:18<11:57, 3.81s/it] 64%|██████▍ | 333/520 [37:22<11:55, 3.83s/it] {'loss': 1.438, 'grad_norm': 0.0007240835217078007, 'learning_rate': 0.1514572330286152, 'epoch': 0.64} + 64%|██████▍ | 333/520 [37:22<11:55, 3.83s/it] 64%|██████▍ | 334/520 [37:26<11:57, 3.86s/it] {'loss': 1.3254, 'grad_norm': 0.0005748341433390448, 'learning_rate': 0.1500269939200648, 'epoch': 0.64} + 64%|██████▍ | 334/520 [37:26<11:57, 3.86s/it] 64%|██████▍ | 335/520 [37:30<11:53, 3.86s/it] {'loss': 1.3352, 'grad_norm': 0.0005480674401343722, 'learning_rate': 0.14860063917638128, 'epoch': 0.64} + 64%|██████▍ | 335/520 [37:30<11:53, 3.86s/it] 65%|██████▍ | 336/520 [37:33<11:43, 3.83s/it] {'loss': 1.2225, 'grad_norm': 0.0006398706130760359, 'learning_rate': 0.14717822421734716, 'epoch': 0.65} + 65%|██████▍ | 336/520 [37:33<11:43, 3.83s/it] 65%|██████▍ | 337/520 [37:37<11:32, 3.78s/it] {'loss': 1.2205, 'grad_norm': 0.0005142864848577353, 'learning_rate': 0.14575980430966806, 'epoch': 0.65} + 65%|██████▍ | 337/520 [37:37<11:32, 3.78s/it] 65%|██████▌ | 338/520 [37:41<11:21, 3.74s/it] {'loss': 1.3537, 'grad_norm': 0.0005839815745441657, 'learning_rate': 0.14434543456482518, 'epoch': 0.65} + 65%|██████▌ | 338/520 [37:41<11:21, 3.74s/it] 65%|██████▌ | 339/520 [37:44<11:15, 3.73s/it] {'loss': 1.2865, 'grad_norm': 0.0005654775971642489, 'learning_rate': 0.14293516993693428, 'epoch': 0.65} + 65%|██████▌ | 339/520 [37:44<11:15, 3.73s/it] 65%|██████▌ | 340/520 [37:48<11:11, 3.73s/it] {'loss': 1.2606, 'grad_norm': 0.0006521726004955792, 'learning_rate': 0.14152906522061048, 'epoch': 0.65} + 65%|██████▌ | 340/520 [37:48<11:11, 3.73s/it] 66%|██████▌ | 341/520 [37:52<11:03, 3.71s/it] {'loss': 1.2912, 'grad_norm': 0.0006191168959670471, 'learning_rate': 0.14012717504883873, 'epoch': 0.66} + 66%|██████▌ | 341/520 [37:52<11:03, 3.71s/it] 66%|██████▌ | 342/520 [37:55<11:01, 3.71s/it] {'loss': 1.4245, 'grad_norm': 0.0007167915289415773, 'learning_rate': 0.1387295538908519, 'epoch': 0.66} + 66%|██████▌ | 342/520 [37:55<11:01, 3.71s/it] 66%|██████▌ | 343/520 [37:59<10:54, 3.70s/it] {'loss': 1.3897, 'grad_norm': 0.0005413512790648661, 'learning_rate': 0.13733625605001365, 'epoch': 0.66} + 66%|██████▌ | 343/520 [37:59<10:54, 3.70s/it] 66%|██████▌ | 344/520 [38:03<10:49, 3.69s/it] {'loss': 1.2366, 'grad_norm': 0.0006090050592720877, 'learning_rate': 0.13594733566170925, 'epoch': 0.66} + 66%|██████▌ | 344/520 [38:03<10:49, 3.69s/it] 66%|██████▋ | 345/520 [38:06<10:46, 3.69s/it] {'loss': 1.3604, 'grad_norm': 0.0005338985449924298, 'learning_rate': 0.13456284669124158, 'epoch': 0.66} + 66%|██████▋ | 345/520 [38:06<10:46, 3.69s/it] 67%|██████▋ | 346/520 [38:10<10:41, 3.68s/it] {'loss': 1.3895, 'grad_norm': 0.000580232498003071, 'learning_rate': 0.1331828429317345, 'epoch': 0.67} + 67%|██████▋ | 346/520 [38:10<10:41, 3.68s/it] 67%|██████▋ | 347/520 [38:14<10:36, 3.68s/it] {'loss': 1.2734, 'grad_norm': 0.0004883103913882678, 'learning_rate': 0.13180737800204329, 'epoch': 0.67} + 67%|██████▋ | 347/520 [38:14<10:36, 3.68s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [38:17<10:33, 3.69s/it] {'loss': 1.2414, 'grad_norm': 0.0006993941484886039, 'learning_rate': 0.13043650534467052, 'epoch': 0.67} + 67%|██████▋ | 348/520 [38:17<10:33, 3.69s/it] 67%|██████▋ | 349/520 [38:21<10:32, 3.70s/it] {'loss': 1.2639, 'grad_norm': 0.000540861162795997, 'learning_rate': 0.12907027822369005, 'epoch': 0.67} + 67%|██████▋ | 349/520 [38:21<10:32, 3.70s/it] 67%|██████▋ | 350/520 [38:25<10:30, 3.71s/it] {'loss': 1.2969, 'grad_norm': 0.0006138856939322632, 'learning_rate': 0.12770874972267776, 'epoch': 0.67} + 67%|██████▋ | 350/520 [38:25<10:30, 3.71s/it] 68%|██████▊ | 351/520 [38:29<10:31, 3.74s/it] {'loss': 1.2102, 'grad_norm': 0.0005412878797433441, 'learning_rate': 0.12635197274264814, 'epoch': 0.68} + 68%|██████▊ | 351/520 [38:29<10:31, 3.74s/it] 68%|██████▊ | 352/520 [38:33<10:29, 3.75s/it] {'loss': 1.3368, 'grad_norm': 0.0005966751651570217, 'learning_rate': 0.12500000000000006, 'epoch': 0.68} + 68%|██████▊ | 352/520 [38:33<10:29, 3.75s/it] 68%|██████▊ | 353/520 [38:36<10:30, 3.77s/it] {'loss': 1.3561, 'grad_norm': 0.0005483683323107514, 'learning_rate': 0.12365288402446739, 'epoch': 0.68} + 68%|██████▊ | 353/520 [38:36<10:30, 3.77s/it] 68%|██████▊ | 354/520 [38:40<10:26, 3.77s/it] {'loss': 1.4831, 'grad_norm': 0.0005586095512472628, 'learning_rate': 0.12231067715707866, 'epoch': 0.68} + 68%|██████▊ | 354/520 [38:40<10:26, 3.77s/it] 68%|██████▊ | 355/520 [38:44<10:22, 3.77s/it] {'loss': 1.2852, 'grad_norm': 0.0005589349547806796, 'learning_rate': 0.12097343154812332, 'epoch': 0.68} + 68%|██████▊ | 355/520 [38:44<10:22, 3.77s/it] 68%|██████▊ | 356/520 [38:48<10:19, 3.78s/it] {'loss': 1.2838, 'grad_norm': 0.000515882342961059, 'learning_rate': 0.1196411991551255, 'epoch': 0.68} + 68%|██████▊ | 356/520 [38:48<10:19, 3.78s/it] 69%|██████▊ | 357/520 [38:51<10:15, 3.78s/it] {'loss': 1.2899, 'grad_norm': 0.0004895005398401303, 'learning_rate': 0.1183140317408248, 'epoch': 0.69} + 69%|██████▊ | 357/520 [38:51<10:15, 3.78s/it] 69%|██████▉ | 358/520 [38:55<10:14, 3.80s/it] {'loss': 1.2454, 'grad_norm': 0.000565784386523428, 'learning_rate': 0.11699198087116588, 'epoch': 0.69} + 69%|██████▉ | 358/520 [38:55<10:14, 3.80s/it] 69%|██████▉ | 359/520 [38:59<10:10, 3.79s/it] {'loss': 1.4051, 'grad_norm': 0.0006133416816313763, 'learning_rate': 0.115675097913294, 'epoch': 0.69} + 69%|██████▉ | 359/520 [38:59<10:10, 3.79s/it] 69%|██████▉ | 360/520 [39:03<10:08, 3.80s/it] {'loss': 1.4086, 'grad_norm': 0.0006523471681596472, 'learning_rate': 0.11436343403356017, 'epoch': 0.69} + 69%|██████▉ | 360/520 [39:03<10:08, 3.80s/it] 69%|██████▉ | 361/520 [39:07<10:03, 3.79s/it] {'loss': 1.4183, 'grad_norm': 0.0004955590160368445, 'learning_rate': 0.1130570401955322, 'epoch': 0.69} + 69%|██████▉ | 361/520 [39:07<10:03, 3.79s/it] 70%|██████▉ | 362/520 [39:10<09:52, 3.75s/it] {'loss': 1.2876, 'grad_norm': 0.0006519687482641982, 'learning_rate': 0.11175596715801514, 'epoch': 0.7} + 70%|██████▉ | 362/520 [39:10<09:52, 3.75s/it] 70%|██████▉ | 363/520 [39:14<09:44, 3.72s/it] {'loss': 1.3529, 'grad_norm': 0.0005585842193056115, 'learning_rate': 0.11046026547307905, 'epoch': 0.7} + 70%|██████▉ | 363/520 [39:14<09:44, 3.72s/it] 70%|███████ | 364/520 [39:18<09:38, 3.71s/it] {'loss': 1.4436, 'grad_norm': 0.0006670505300086507, 'learning_rate': 0.10916998548409448, 'epoch': 0.7} + 70%|███████ | 364/520 [39:18<09:38, 3.71s/it] 70%|███████ | 365/520 [39:21<09:31, 3.69s/it] {'loss': 1.3961, 'grad_norm': 0.0005365752664061776, 'learning_rate': 0.10788517732377695, 'epoch': 0.7} + 70%|███████ | 365/520 [39:21<09:31, 3.69s/it] 70%|███████ | 366/520 [39:25<09:26, 3.68s/it] {'loss': 1.3517, 'grad_norm': 0.0004918243363770682, 'learning_rate': 0.10660589091223854, 'epoch': 0.7} + 70%|███████ | 366/520 [39:25<09:26, 3.68s/it] 71%|███████ | 367/520 [39:29<09:24, 3.69s/it] {'loss': 1.3478, 'grad_norm': 0.0008295682078508057, 'learning_rate': 0.10533217595504857, 'epoch': 0.71} + 71%|███████ | 367/520 [39:29<09:24, 3.69s/it] 71%|███████ | 368/520 [39:32<09:19, 3.68s/it] {'loss': 1.1997, 'grad_norm': 0.0006404092879613156, 'learning_rate': 0.1040640819413026, 'epoch': 0.71} + 71%|███████ | 368/520 [39:32<09:19, 3.68s/it] 71%|███████ | 369/520 [39:36<09:16, 3.68s/it] {'loss': 1.3971, 'grad_norm': 0.0005660644635972012, 'learning_rate': 0.10280165814169884, 'epoch': 0.71} + 71%|███████ | 369/520 [39:36<09:16, 3.68s/it] 71%|███████ | 370/520 [39:40<09:10, 3.67s/it] {'loss': 1.2553, 'grad_norm': 0.0006054686665670367, 'learning_rate': 0.10154495360662463, 'epoch': 0.71} + 71%|███████ | 370/520 [39:40<09:10, 3.67s/it] 71%|███████▏ | 371/520 [39:43<09:04, 3.65s/it] {'loss': 1.237, 'grad_norm': 0.0005843092997367737, 'learning_rate': 0.10029401716424993, 'epoch': 0.71} + 71%|███████▏ | 371/520 [39:43<09:04, 3.65s/it] 72%|███████▏ | 372/520 [39:47<09:00, 3.65s/it] {'loss': 1.4896, 'grad_norm': 0.0005984706395052875, 'learning_rate': 0.0990488974186306, 'epoch': 0.72} + 72%|███████▏ | 372/520 [39:47<09:00, 3.65s/it] 72%|███████▏ | 373/520 [39:51<08:56, 3.65s/it] {'loss': 1.3682, 'grad_norm': 0.0006724940367589949, 'learning_rate': 0.09780964274781984, 'epoch': 0.72} + 72%|███████▏ | 373/520 [39:51<08:56, 3.65s/it] 72%|███████▏ | 374/520 [39:54<08:51, 3.64s/it] {'loss': 1.343, 'grad_norm': 0.0006995257656555496, 'learning_rate': 0.09657630130198819, 'epoch': 0.72} + 72%|███████▏ | 374/520 [39:54<08:51, 3.64s/it] 72%|███████▏ | 375/520 [39:58<08:47, 3.64s/it] {'loss': 1.2395, 'grad_norm': 0.0006232585315285599, 'learning_rate': 0.09534892100155296, 'epoch': 0.72} + 72%|███████▏ | 375/520 [39:58<08:47, 3.64s/it] 72%|███████▏ | 376/520 [40:01<08:42, 3.63s/it] {'loss': 1.3624, 'grad_norm': 0.000617836195772396, 'learning_rate': 0.09412754953531663, 'epoch': 0.72} + 72%|███████▏ | 376/520 [40:01<08:42, 3.63s/it] 72%|███████▎ | 377/520 [40:05<08:43, 3.66s/it] {'loss': 1.3072, 'grad_norm': 0.0006033721168373979, 'learning_rate': 0.09291223435861318, 'epoch': 0.72} + 72%|███████▎ | 377/520 [40:05<08:43, 3.66s/it] 73%|███████▎ | 378/520 [40:09<08:43, 3.69s/it] {'loss': 1.3544, 'grad_norm': 0.0005281848246864963, 'learning_rate': 0.09170302269146507, 'epoch': 0.73} + 73%|███████▎ | 378/520 [40:09<08:43, 3.69s/it] 73%|███████▎ | 379/520 [40:13<08:44, 3.72s/it] {'loss': 1.3345, 'grad_norm': 0.0005016153529030398, 'learning_rate': 0.09049996151674788, 'epoch': 0.73} + 73%|███████▎ | 379/520 [40:13<08:44, 3.72s/it] 73%|███████▎ | 380/520 [40:17<08:43, 3.74s/it] {'loss': 1.4703, 'grad_norm': 0.0005296218346600075, 'learning_rate': 0.08930309757836516, 'epoch': 0.73} + 73%|███████▎ | 380/520 [40:17<08:43, 3.74s/it] 73%|███████▎ | 381/520 [40:20<08:41, 3.75s/it] {'loss': 1.3402, 'grad_norm': 0.0005459542994580013, 'learning_rate': 0.08811247737943242, 'epoch': 0.73} + 73%|███████▎ | 381/520 [40:20<08:41, 3.75s/it] 73%|███████▎ | 382/520 [40:24<08:38, 3.76s/it] {'loss': 1.4075, 'grad_norm': 0.0005729644580429135, 'learning_rate': 0.08692814718046979, 'epoch': 0.73} + 73%|███████▎ | 382/520 [40:24<08:38, 3.76s/it] 74%|███████▎ | 383/520 [40:28<08:34, 3.76s/it] {'loss': 1.1765, 'grad_norm': 0.000703287884394464, 'learning_rate': 0.08575015299760491, 'epoch': 0.74} + 74%|███████▎ | 383/520 [40:28<08:34, 3.76s/it] 74%|███████▍ | 384/520 [40:32<08:29, 3.74s/it] {'loss': 1.5548, 'grad_norm': 0.000640316774498738, 'learning_rate': 0.08457854060078521, 'epoch': 0.74} + 74%|███████▍ | 384/520 [40:32<08:29, 3.74s/it] 74%|███████▍ | 385/520 [40:35<08:26, 3.75s/it] {'loss': 1.321, 'grad_norm': 0.0005254132999773872, 'learning_rate': 0.08341335551199902, 'epoch': 0.74} + 74%|███████▍ | 385/520 [40:35<08:26, 3.75s/it] 74%|███████▍ | 386/520 [40:39<08:22, 3.75s/it] {'loss': 1.2583, 'grad_norm': 0.0005439980523257469, 'learning_rate': 0.08225464300350752, 'epoch': 0.74} + 74%|███████▍ | 386/520 [40:39<08:22, 3.75s/it] 74%|███████▍ | 387/520 [40:43<08:20, 3.76s/it] {'loss': 1.5041, 'grad_norm': 0.000533575084793846, 'learning_rate': 0.08110244809608494, 'epoch': 0.74} + 74%|███████▍ | 387/520 [40:43<08:20, 3.76s/it] 75%|███████▍ | 388/520 [40:47<08:17, 3.77s/it] {'loss': 1.224, 'grad_norm': 0.0006054720531574048, 'learning_rate': 0.0799568155572701, 'epoch': 0.75} + 75%|███████▍ | 388/520 [40:47<08:17, 3.77s/it] 75%|███████▍ | 389/520 [40:50<08:09, 3.74s/it] {'loss': 1.2845, 'grad_norm': 0.0007984305475716121, 'learning_rate': 0.07881778989962662, 'epoch': 0.75} + 75%|███████▍ | 389/520 [40:50<08:09, 3.74s/it] 75%|███████▌ | 390/520 [40:54<08:01, 3.70s/it] {'loss': 1.3487, 'grad_norm': 0.0005307882974102549, 'learning_rate': 0.07768541537901325, 'epoch': 0.75} + 75%|███████▌ | 390/520 [40:54<08:01, 3.70s/it] 75%|███████▌ | 391/520 [40:58<07:55, 3.69s/it] {'loss': 1.427, 'grad_norm': 0.0006570675790624361, 'learning_rate': 0.07655973599286459, 'epoch': 0.75} + 75%|███████▌ | 391/520 [40:58<07:55, 3.69s/it] 75%|███████▌ | 392/520 [41:01<07:49, 3.67s/it] {'loss': 1.2393, 'grad_norm': 0.0006357169538827758, 'learning_rate': 0.07544079547848181, 'epoch': 0.75} + 75%|███████▌ | 392/520 [41:01<07:49, 3.67s/it] 76%|███████▌ | 393/520 [41:05<07:45, 3.66s/it] {'loss': 1.3077, 'grad_norm': 0.000532693336093858, 'learning_rate': 0.07432863731133271, 'epoch': 0.76} + 76%|███████▌ | 393/520 [41:05<07:45, 3.66s/it] 76%|███████▌ | 394/520 [41:08<07:40, 3.66s/it] {'loss': 1.3015, 'grad_norm': 0.0005850998176403993, 'learning_rate': 0.07322330470336313, 'epoch': 0.76} + 76%|███████▌ | 394/520 [41:08<07:40, 3.66s/it] 76%|███████▌ | 395/520 [41:12<07:35, 3.64s/it] {'loss': 1.2674, 'grad_norm': 0.0005740040384557005, 'learning_rate': 0.07212484060131752, 'epoch': 0.76} + 76%|███████▌ | 395/520 [41:12<07:35, 3.64s/it] 76%|███████▌ | 396/520 [41:16<07:32, 3.65s/it] {'loss': 1.341, 'grad_norm': 0.0005620415805698448, 'learning_rate': 0.07103328768507039, 'epoch': 0.76} + 76%|███████▌ | 396/520 [41:16<07:32, 3.65s/it] 76%|███████▋ | 397/520 [41:19<07:27, 3.64s/it] {'loss': 1.3245, 'grad_norm': 0.0006508247647532616, 'learning_rate': 0.0699486883659684, 'epoch': 0.76} + 76%|███████▋ | 397/520 [41:19<07:27, 3.64s/it] 77%|███████▋ | 398/520 [41:23<07:25, 3.65s/it] {'loss': 1.3046, 'grad_norm': 0.0005969265015055941, 'learning_rate': 0.06887108478518184, 'epoch': 0.77} + 77%|███████▋ | 398/520 [41:23<07:25, 3.65s/it] 77%|███████▋ | 399/520 [41:27<07:22, 3.66s/it] {'loss': 1.3553, 'grad_norm': 0.0005918585859797095, 'learning_rate': 0.06780051881206792, 'epoch': 0.77} + 77%|███████▋ | 399/520 [41:27<07:22, 3.66s/it] 77%|███████▋ | 400/520 [41:30<07:20, 3.67s/it] {'loss': 1.3958, 'grad_norm': 0.0005450505453271616, 'learning_rate': 0.06673703204254347, 'epoch': 0.77} + 77%|███████▋ | 400/520 [41:30<07:20, 3.67s/it] 77%|███████▋ | 401/520 [41:34<07:16, 3.66s/it] {'loss': 1.1405, 'grad_norm': 0.000681324439473779, 'learning_rate': 0.065680665797469, 'epoch': 0.77} + 77%|███████▋ | 401/520 [41:34<07:16, 3.66s/it] 77%|███████▋ | 402/520 [41:38<07:14, 3.69s/it] {'loss': 1.2621, 'grad_norm': 0.0005925581114061987, 'learning_rate': 0.06463146112104332, 'epoch': 0.77} + 77%|███████▋ | 402/520 [41:38<07:14, 3.69s/it] 78%|███████▊ | 403/520 [41:41<07:10, 3.68s/it] {'loss': 1.3022, 'grad_norm': 0.000611687580708976, 'learning_rate': 0.0635894587792086, 'epoch': 0.78} + 78%|███████▊ | 403/520 [41:41<07:10, 3.68s/it] 78%|███████▊ | 404/520 [41:45<07:06, 3.68s/it] {'loss': 1.2117, 'grad_norm': 0.000712608158144599, 'learning_rate': 0.06255469925806642, 'epoch': 0.78} + 78%|███████▊ | 404/520 [41:45<07:06, 3.68s/it] 78%|███████▊ | 405/520 [41:49<07:03, 3.69s/it] {'loss': 1.3528, 'grad_norm': 0.000527276510815829, 'learning_rate': 0.061527222762305045, 'epoch': 0.78} + 78%|███████▊ | 405/520 [41:49<07:03, 3.69s/it] 78%|███████▊ | 406/520 [41:53<07:00, 3.69s/it] {'loss': 1.3038, 'grad_norm': 0.0007270114803227788, 'learning_rate': 0.060507069213636716, 'epoch': 0.78} + 78%|███████▊ | 406/520 [41:53<07:00, 3.69s/it] 78%|███████▊ | 407/520 [41:56<06:56, 3.69s/it] {'loss': 1.3925, 'grad_norm': 0.0005651634070799685, 'learning_rate': 0.0594942782492473, 'epoch': 0.78} + 78%|███████▊ | 407/520 [41:56<06:56, 3.69s/it] 78%|███████▊ | 408/520 [42:00<06:52, 3.68s/it] {'loss': 1.2835, 'grad_norm': 0.0008063660702433101, 'learning_rate': 0.058488889220255524, 'epoch': 0.78} + 78%|███████▊ | 408/520 [42:00<06:52, 3.68s/it] 79%|███████▊ | 409/520 [42:04<06:48, 3.68s/it] {'loss': 1.4209, 'grad_norm': 0.0006506645037508339, 'learning_rate': 0.0574909411901843, 'epoch': 0.79} + 79%|███████▊ | 409/520 [42:04<06:48, 3.68s/it] 79%|███████▉ | 410/520 [42:07<06:45, 3.69s/it] {'loss': 1.1412, 'grad_norm': 0.0005726422505289854, 'learning_rate': 0.05650047293344315, 'epoch': 0.79} + 79%|███████▉ | 410/520 [42:07<06:45, 3.69s/it] 79%|███████▉ | 411/520 [42:11<06:41, 3.68s/it] {'loss': 1.3868, 'grad_norm': 0.000630356260651178, 'learning_rate': 0.05551752293382131, 'epoch': 0.79} + 79%|███████▉ | 411/520 [42:11<06:41, 3.68s/it] 79%|███████▉ | 412/520 [42:15<06:39, 3.69s/it] {'loss': 1.3107, 'grad_norm': 0.000545571856959134, 'learning_rate': 0.05454212938299255, 'epoch': 0.79} + 79%|███████▉ | 412/520 [42:15<06:39, 3.69s/it] 79%|███████▉ | 413/520 [42:18<06:34, 3.69s/it] {'loss': 1.4114, 'grad_norm': 0.0007012369442372861, 'learning_rate': 0.053574330179031626, 'epoch': 0.79} + 79%|███████▉ | 413/520 [42:18<06:34, 3.69s/it] 80%|███████▉ | 414/520 [42:22<06:31, 3.69s/it] {'loss': 1.1736, 'grad_norm': 0.0005515585637226483, 'learning_rate': 0.05261416292494117, 'epoch': 0.8} + 80%|███████▉ | 414/520 [42:22<06:31, 3.69s/it] 80%|███████▉ | 415/520 [42:26<06:27, 3.69s/it] {'loss': 1.2711, 'grad_norm': 0.0005045910823247768, 'learning_rate': 0.051661664927191236, 'epoch': 0.8} + 80%|███████▉ | 415/520 [42:26<06:27, 3.69s/it] 80%|████████ | 416/520 [42:29<06:22, 3.68s/it] {'loss': 1.187, 'grad_norm': 0.0006490445802788986, 'learning_rate': 0.05071687319426946, 'epoch': 0.8} + 80%|████████ | 416/520 [42:29<06:22, 3.68s/it] 80%|████████ | 417/520 [42:33<06:19, 3.69s/it] {'loss': 1.3528, 'grad_norm': 0.0006393140408232801, 'learning_rate': 0.049779824435243036, 'epoch': 0.8} + 80%|████████ | 417/520 [42:33<06:19, 3.69s/it] 80%|████████ | 418/520 [42:37<06:14, 3.67s/it] {'loss': 1.3491, 'grad_norm': 0.0005273608308599211, 'learning_rate': 0.04885055505833291, 'epoch': 0.8} + 80%|████████ | 418/520 [42:37<06:14, 3.67s/it] 81%|████████ | 419/520 [42:40<06:09, 3.66s/it] {'loss': 1.3364, 'grad_norm': 0.0005869113938043293, 'learning_rate': 0.047929101169498695, 'epoch': 0.81} + 81%|████████ | 419/520 [42:40<06:09, 3.66s/it] 81%|████████ | 420/520 [42:44<06:05, 3.65s/it] {'loss': 1.2127, 'grad_norm': 0.0006299668420949522, 'learning_rate': 0.047015498571035874, 'epoch': 0.81} + 81%|████████ | 420/520 [42:44<06:05, 3.65s/it] 81%|████████ | 421/520 [42:48<06:02, 3.66s/it] {'loss': 1.1525, 'grad_norm': 0.0006848092973433684, 'learning_rate': 0.04610978276018496, 'epoch': 0.81} + 81%|████████ | 421/520 [42:48<06:02, 3.66s/it] 81%|████████ | 422/520 [42:51<05:57, 3.65s/it] {'loss': 1.2863, 'grad_norm': 0.0006322303099739015, 'learning_rate': 0.04521198892775202, 'epoch': 0.81} + 81%|████████ | 422/520 [42:51<05:57, 3.65s/it] 81%|████████▏ | 423/520 [42:55<05:54, 3.66s/it] {'loss': 1.2699, 'grad_norm': 0.0006781418764971341, 'learning_rate': 0.04432215195674166, 'epoch': 0.81} + 81%|████████▏ | 423/520 [42:55<05:54, 3.66s/it] 82%|████████▏ | 424/520 [42:59<05:52, 3.67s/it] {'loss': 1.5033, 'grad_norm': 0.0007218480889434849, 'learning_rate': 0.04344030642100133, 'epoch': 0.82} + 82%|████████▏ | 424/520 [42:59<05:52, 3.67s/it] 82%|████████▏ | 425/520 [43:02<05:48, 3.66s/it] {'loss': 1.2635, 'grad_norm': 0.0005398257135076189, 'learning_rate': 0.04256648658387813, 'epoch': 0.82} + 82%|████████▏ | 425/520 [43:02<05:48, 3.66s/it] 82%|████████▏ | 426/520 [43:06<05:44, 3.67s/it] {'loss': 1.3211, 'grad_norm': 0.0008488344031425391, 'learning_rate': 0.041700726396887794, 'epoch': 0.82} + 82%|████████▏ | 426/520 [43:06<05:44, 3.67s/it] 82%|████████▏ | 427/520 [43:10<05:40, 3.66s/it] {'loss': 1.2023, 'grad_norm': 0.000536465782173985, 'learning_rate': 0.04084305949839506, 'epoch': 0.82} + 82%|████████▏ | 427/520 [43:10<05:40, 3.66s/it] 82%|████████▏ | 428/520 [43:13<05:36, 3.66s/it] {'loss': 1.1827, 'grad_norm': 0.0005996174925593116, 'learning_rate': 0.03999351921230715, 'epoch': 0.82} + 82%|████████▏ | 428/520 [43:13<05:36, 3.66s/it] 82%|████████▎ | 429/520 [43:17<05:39, 3.73s/it] {'loss': 1.2957, 'grad_norm': 0.0005934629152868618, 'learning_rate': 0.039152138546778625, 'epoch': 0.82} + 82%|████████▎ | 429/520 [43:17<05:39, 3.73s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [43:21<05:40, 3.78s/it] {'loss': 1.2851, 'grad_norm': 0.0005350408446440115, 'learning_rate': 0.03831895019292897, 'epoch': 0.83} + 83%|████████▎ | 430/520 [43:21<05:40, 3.78s/it] 83%|████████▎ | 431/520 [43:25<05:39, 3.81s/it] {'loss': 1.3757, 'grad_norm': 0.0005785270149129147, 'learning_rate': 0.03749398652357272, 'epoch': 0.83} + 83%|████████▎ | 431/520 [43:25<05:39, 3.81s/it] 83%|████████▎ | 432/520 [43:29<05:34, 3.80s/it] {'loss': 1.194, 'grad_norm': 0.0005727334877827648, 'learning_rate': 0.0366772795919611, 'epoch': 0.83} + 83%|████████▎ | 432/520 [43:29<05:34, 3.80s/it] 83%|████████▎ | 433/520 [43:32<05:27, 3.77s/it] {'loss': 1.3404, 'grad_norm': 0.0005950569943297289, 'learning_rate': 0.035868861130537166, 'epoch': 0.83} + 83%|████████▎ | 433/520 [43:32<05:27, 3.77s/it] 83%|████████▎ | 434/520 [43:36<05:21, 3.74s/it] {'loss': 1.0686, 'grad_norm': 0.0006468215858479596, 'learning_rate': 0.035068762549702426, 'epoch': 0.83} + 83%|████████▎ | 434/520 [43:36<05:21, 3.74s/it] 84%|████████▎ | 435/520 [43:40<05:15, 3.71s/it] {'loss': 1.386, 'grad_norm': 0.0006524050506796475, 'learning_rate': 0.03427701493659674, 'epoch': 0.84} + 84%|████████▎ | 435/520 [43:40<05:15, 3.71s/it] 84%|████████▍ | 436/520 [43:43<05:10, 3.70s/it] {'loss': 1.1666, 'grad_norm': 0.0005713093490238039, 'learning_rate': 0.03349364905389032, 'epoch': 0.84} + 84%|████████▍ | 436/520 [43:43<05:10, 3.70s/it] 84%|████████▍ | 437/520 [43:47<05:07, 3.70s/it] {'loss': 1.398, 'grad_norm': 0.0005588053366245765, 'learning_rate': 0.0327186953385884, 'epoch': 0.84} + 84%|████████▍ | 437/520 [43:47<05:07, 3.70s/it] 84%|████████▍ | 438/520 [43:51<05:02, 3.69s/it] {'loss': 1.1942, 'grad_norm': 0.0005575559255364334, 'learning_rate': 0.03195218390084867, 'epoch': 0.84} + 84%|████████▍ | 438/520 [43:51<05:02, 3.69s/it] 84%|████████▍ | 439/520 [43:54<04:58, 3.69s/it] {'loss': 1.3349, 'grad_norm': 0.0005089932861137818, 'learning_rate': 0.03119414452281158, 'epoch': 0.84} + 84%|████████▍ | 439/520 [43:54<04:58, 3.69s/it] 85%|████████▍ | 440/520 [43:58<04:54, 3.68s/it] {'loss': 1.2575, 'grad_norm': 0.0006644738928042205, 'learning_rate': 0.030444606657442835, 'epoch': 0.85} + 85%|████████▍ | 440/520 [43:58<04:54, 3.68s/it] 85%|████████▍ | 441/520 [44:02<04:52, 3.70s/it] {'loss': 1.3732, 'grad_norm': 0.0005460451556326197, 'learning_rate': 0.0297035994273894, 'epoch': 0.85} + 85%|████████▍ | 441/520 [44:02<04:52, 3.70s/it] 85%|████████▌ | 442/520 [44:06<04:50, 3.72s/it] {'loss': 1.3147, 'grad_norm': 0.0007252486081776607, 'learning_rate': 0.028971151623847585, 'epoch': 0.85} + 85%|████████▌ | 442/520 [44:06<04:50, 3.72s/it] 85%|████████▌ | 443/520 [44:09<04:48, 3.74s/it] {'loss': 1.3234, 'grad_norm': 0.0005843893815213663, 'learning_rate': 0.02824729170544457, 'epoch': 0.85} + 85%|████████▌ | 443/520 [44:09<04:48, 3.74s/it] 85%|████████▌ | 444/520 [44:13<04:44, 3.75s/it] {'loss': 1.288, 'grad_norm': 0.0005536989477624, 'learning_rate': 0.027532047797132864, 'epoch': 0.85} + 85%|████████▌ | 444/520 [44:13<04:44, 3.75s/it] 86%|████████▌ | 445/520 [44:17<04:42, 3.77s/it] {'loss': 1.2061, 'grad_norm': 0.0006331729550006071, 'learning_rate': 0.02682544768909717, 'epoch': 0.86} + 86%|████████▌ | 445/520 [44:17<04:42, 3.77s/it] 86%|████████▌ | 446/520 [44:21<04:36, 3.74s/it] {'loss': 1.4373, 'grad_norm': 0.000642829284552052, 'learning_rate': 0.026127518835674768, 'epoch': 0.86} + 86%|████████▌ | 446/520 [44:21<04:36, 3.74s/it] 86%|████████▌ | 447/520 [44:24<04:31, 3.72s/it] {'loss': 1.2954, 'grad_norm': 0.0005162319530529507, 'learning_rate': 0.02543828835428899, 'epoch': 0.86} + 86%|████████▌ | 447/520 [44:24<04:31, 3.72s/it] 86%|████████▌ | 448/520 [44:28<04:26, 3.71s/it] {'loss': 1.2792, 'grad_norm': 0.0008422897415759623, 'learning_rate': 0.02475778302439524, 'epoch': 0.86} + 86%|████████▌ | 448/520 [44:28<04:26, 3.71s/it] 86%|████████▋ | 449/520 [44:32<04:22, 3.70s/it] {'loss': 1.4198, 'grad_norm': 0.000568870338171427, 'learning_rate': 0.02408602928644088, 'epoch': 0.86} + 86%|████████▋ | 449/520 [44:32<04:22, 3.70s/it] 87%|████████▋ | 450/520 [44:35<04:18, 3.70s/it] {'loss': 1.3316, 'grad_norm': 0.0006050084752382764, 'learning_rate': 0.023423053240837516, 'epoch': 0.87} + 87%|████████▋ | 450/520 [44:35<04:18, 3.70s/it] 87%|████████▋ | 451/520 [44:39<04:14, 3.68s/it] {'loss': 1.3292, 'grad_norm': 0.0007141872183347075, 'learning_rate': 0.022768880646947265, 'epoch': 0.87} + 87%|████████▋ | 451/520 [44:39<04:14, 3.68s/it] 87%|████████▋ | 452/520 [44:43<04:09, 3.68s/it] {'loss': 1.4395, 'grad_norm': 0.0005589216913593605, 'learning_rate': 0.022123536922081716, 'epoch': 0.87} + 87%|████████▋ | 452/520 [44:43<04:09, 3.68s/it] 87%|████████▋ | 453/520 [44:46<04:07, 3.69s/it] {'loss': 1.419, 'grad_norm': 0.0006403575076509997, 'learning_rate': 0.021487047140514248, 'epoch': 0.87} + 87%|████████▋ | 453/520 [44:46<04:07, 3.69s/it] 87%|████████▋ | 454/520 [44:50<04:03, 3.69s/it] {'loss': 1.2176, 'grad_norm': 0.00062089690144699, 'learning_rate': 0.02085943603250595, 'epoch': 0.87} + 87%|████████▋ | 454/520 [44:50<04:03, 3.69s/it] 88%|████████▊ | 455/520 [44:54<03:59, 3.68s/it] {'loss': 1.3723, 'grad_norm': 0.0006536079322805274, 'learning_rate': 0.020240727983344836, 'epoch': 0.88} + 88%|████████▊ | 455/520 [44:54<03:59, 3.68s/it] 88%|████████▊ | 456/520 [44:57<03:54, 3.67s/it] {'loss': 1.2824, 'grad_norm': 0.0005898413107686381, 'learning_rate': 0.019630947032398066, 'epoch': 0.88} + 88%|████████▊ | 456/520 [44:57<03:54, 3.67s/it] 88%|████████▊ | 457/520 [45:01<03:51, 3.67s/it] {'loss': 1.4085, 'grad_norm': 0.0006029795663914253, 'learning_rate': 0.019030116872178315, 'epoch': 0.88} + 88%|████████▊ | 457/520 [45:01<03:51, 3.67s/it] 88%|████████▊ | 458/520 [45:05<03:47, 3.67s/it] {'loss': 1.433, 'grad_norm': 0.00063189256994357, 'learning_rate': 0.018438260847422838, 'epoch': 0.88} + 88%|████████▊ | 458/520 [45:05<03:47, 3.67s/it] 88%|████████▊ | 459/520 [45:08<03:44, 3.68s/it] {'loss': 1.3525, 'grad_norm': 0.0006069372863759417, 'learning_rate': 0.01785540195418661, 'epoch': 0.88} + 88%|████████▊ | 459/520 [45:08<03:44, 3.68s/it] 88%|████████▊ | 460/520 [45:12<03:41, 3.69s/it] {'loss': 1.2254, 'grad_norm': 0.0005923628553927085, 'learning_rate': 0.017281562838948966, 'epoch': 0.88} + 88%|████████▊ | 460/520 [45:12<03:41, 3.69s/it] 89%|████████▊ | 461/520 [45:16<03:38, 3.70s/it] {'loss': 1.4891, 'grad_norm': 0.0006325633846403456, 'learning_rate': 0.016716765797733374, 'epoch': 0.89} + 89%|████████▊ | 461/520 [45:16<03:38, 3.70s/it] 89%|████████▉ | 462/520 [45:20<03:34, 3.70s/it] {'loss': 1.4858, 'grad_norm': 0.0005938810598239647, 'learning_rate': 0.0161610327752415, 'epoch': 0.89} + 89%|████████▉ | 462/520 [45:20<03:34, 3.70s/it] 89%|████████▉ | 463/520 [45:23<03:30, 3.69s/it] {'loss': 1.196, 'grad_norm': 0.0005974963472176447, 'learning_rate': 0.015614385364000227, 'epoch': 0.89} + 89%|████████▉ | 463/520 [45:23<03:30, 3.69s/it] 89%|████████▉ | 464/520 [45:27<03:26, 3.69s/it] {'loss': 1.3366, 'grad_norm': 0.0005645253657023102, 'learning_rate': 0.01507684480352292, 'epoch': 0.89} + 89%|████████▉ | 464/520 [45:27<03:26, 3.69s/it] 89%|████████▉ | 465/520 [45:31<03:23, 3.69s/it] {'loss': 1.4447, 'grad_norm': 0.0005680692570890049, 'learning_rate': 0.014548431979484133, 'epoch': 0.89} + 89%|████████▉ | 465/520 [45:31<03:23, 3.69s/it] 90%|████████▉ | 466/520 [45:35<03:23, 3.76s/it] {'loss': 1.3373, 'grad_norm': 0.0005030746416678913, 'learning_rate': 0.014029167422908106, 'epoch': 0.9} + 90%|████████▉ | 466/520 [45:35<03:23, 3.76s/it] 90%|████████▉ | 467/520 [45:39<03:21, 3.81s/it] {'loss': 1.3652, 'grad_norm': 0.0005194149083621631, 'learning_rate': 0.013519071309370995, 'epoch': 0.9} + 90%|████████▉ | 467/520 [45:39<03:21, 3.81s/it] 90%|█████████ | 468/520 [45:42<03:19, 3.84s/it] {'loss': 1.3078, 'grad_norm': 0.0006579527227862476, 'learning_rate': 0.013018163458217075, 'epoch': 0.9} + 90%|█████████ | 468/520 [45:42<03:19, 3.84s/it] 90%|█████████ | 469/520 [45:46<03:17, 3.87s/it] {'loss': 1.3722, 'grad_norm': 0.0005634115202640272, 'learning_rate': 0.012526463331788501, 'epoch': 0.9} + 90%|█████████ | 469/520 [45:46<03:17, 3.87s/it] 90%|█████████ | 470/520 [45:50<03:14, 3.89s/it] {'loss': 1.2259, 'grad_norm': 0.0005854039045637916, 'learning_rate': 0.01204399003466941, 'epoch': 0.9} + 90%|█████████ | 470/520 [45:50<03:14, 3.89s/it] 91%|█████████ | 471/520 [45:54<03:11, 3.91s/it] {'loss': 1.2726, 'grad_norm': 0.0006656656108365568, 'learning_rate': 0.011570762312943295, 'epoch': 0.91} + 91%|█████████ | 471/520 [45:54<03:11, 3.91s/it] 91%|█████████ | 472/520 [45:58<03:07, 3.91s/it] {'loss': 1.2294, 'grad_norm': 0.0006565572890232414, 'learning_rate': 0.011106798553464803, 'epoch': 0.91} + 91%|█████████ | 472/520 [45:58<03:07, 3.91s/it] 91%|█████████ | 473/520 [46:02<03:04, 3.92s/it] {'loss': 1.3072, 'grad_norm': 0.0006028322572263376, 'learning_rate': 0.010652116783145482, 'epoch': 0.91} + 91%|█████████ | 473/520 [46:02<03:04, 3.92s/it] 91%|█████████ | 474/520 [46:06<03:00, 3.93s/it] {'loss': 1.4178, 'grad_norm': 0.0005201578153006478, 'learning_rate': 0.010206734668253059, 'epoch': 0.91} + 91%|█████████ | 474/520 [46:06<03:00, 3.93s/it] 91%|█████████▏| 475/520 [46:10<02:53, 3.87s/it] {'loss': 1.3089, 'grad_norm': 0.0006256216114114947, 'learning_rate': 0.009770669513725128, 'epoch': 0.91} + 91%|█████████▏| 475/520 [46:10<02:53, 3.87s/it] 92%|█████████▏| 476/520 [46:13<02:47, 3.81s/it] {'loss': 1.2834, 'grad_norm': 0.0006627310867336249, 'learning_rate': 0.009343938262496992, 'epoch': 0.92} + 92%|█████████▏| 476/520 [46:13<02:47, 3.81s/it] 92%|█████████▏| 477/520 [46:17<02:42, 3.78s/it] {'loss': 1.2699, 'grad_norm': 0.0006848685205215471, 'learning_rate': 0.008926557494843085, 'epoch': 0.92} + 92%|█████████▏| 477/520 [46:17<02:42, 3.78s/it] 92%|█████████▏| 478/520 [46:21<02:37, 3.75s/it] {'loss': 1.2135, 'grad_norm': 0.000573092937504486, 'learning_rate': 0.00851854342773295, 'epoch': 0.92} + 92%|█████████▏| 478/520 [46:21<02:37, 3.75s/it] 92%|█████████▏| 479/520 [46:25<02:32, 3.73s/it] {'loss': 1.3981, 'grad_norm': 0.0008389550953357667, 'learning_rate': 0.008119911914200972, 'epoch': 0.92} + 92%|█████████▏| 479/520 [46:25<02:32, 3.73s/it] 92%|█████████▏| 480/520 [46:28<02:28, 3.72s/it] {'loss': 1.4032, 'grad_norm': 0.0006353480466192032, 'learning_rate': 0.0077306784427305375, 'epoch': 0.92} + 92%|█████████▏| 480/520 [46:28<02:28, 3.72s/it] 92%|█████████▎| 481/520 [46:32<02:24, 3.71s/it] {'loss': 1.4136, 'grad_norm': 0.000632875171332906, 'learning_rate': 0.007350858136652261, 'epoch': 0.93} + 92%|█████████▎| 481/520 [46:32<02:24, 3.71s/it] 93%|█████████▎| 482/520 [46:36<02:20, 3.70s/it] {'loss': 1.4179, 'grad_norm': 0.0006288830940933863, 'learning_rate': 0.006980465753556375, 'epoch': 0.93} + 93%|█████████▎| 482/520 [46:36<02:20, 3.70s/it] 93%|█████████▎| 483/520 [46:39<02:16, 3.69s/it] {'loss': 1.3007, 'grad_norm': 0.000738432012960674, 'learning_rate': 0.006619515684719163, 'epoch': 0.93} + 93%|█████████▎| 483/520 [46:39<02:16, 3.69s/it] 93%|█████████▎| 484/520 [46:43<02:13, 3.70s/it] {'loss': 1.3039, 'grad_norm': 0.0006485664924886888, 'learning_rate': 0.006268021954544095, 'epoch': 0.93} + 93%|█████████▎| 484/520 [46:43<02:13, 3.70s/it] 93%|█████████▎| 485/520 [46:47<02:09, 3.70s/it] {'loss': 1.2553, 'grad_norm': 0.0006467538369697224, 'learning_rate': 0.0059259982200166594, 'epoch': 0.93} + 93%|█████████▎| 485/520 [46:47<02:09, 3.70s/it] 93%|█████████▎| 486/520 [46:50<02:05, 3.69s/it] {'loss': 1.3844, 'grad_norm': 0.0005779848802971149, 'learning_rate': 0.005593457770173865, 'epoch': 0.93} + 93%|█████████▎| 486/520 [46:50<02:05, 3.69s/it] 94%|█████████▎| 487/520 [46:54<02:01, 3.68s/it] {'loss': 1.2314, 'grad_norm': 0.0005243592269696154, 'learning_rate': 0.005270413525587908, 'epoch': 0.94} + 94%|█████████▎| 487/520 [46:54<02:01, 3.68s/it] 94%|█████████▍| 488/520 [46:58<01:58, 3.71s/it] {'loss': 1.1704, 'grad_norm': 0.0005644192695431304, 'learning_rate': 0.004956878037864043, 'epoch': 0.94} + 94%|█████████▍| 488/520 [46:58<01:58, 3.71s/it] 94%|█████████▍| 489/520 [47:01<01:54, 3.70s/it] {'loss': 1.4077, 'grad_norm': 0.0005423171392775805, 'learning_rate': 0.004652863489153086, 'epoch': 0.94} + 94%|█████████▍| 489/520 [47:01<01:54, 3.70s/it] 94%|█████████▍| 490/520 [47:05<01:52, 3.76s/it] {'loss': 1.2914, 'grad_norm': 0.0005945821520166193, 'learning_rate': 0.004358381691677932, 'epoch': 0.94} + 94%|█████████▍| 490/520 [47:05<01:52, 3.76s/it] 94%|█████████▍| 491/520 [47:09<01:49, 3.79s/it] {'loss': 1.2511, 'grad_norm': 0.0006140586393744285, 'learning_rate': 0.004073444087274669, 'epoch': 0.94} + 94%|█████████▍| 491/520 [47:09<01:49, 3.79s/it] 95%|█████████▍| 492/520 [47:13<01:47, 3.82s/it] {'loss': 1.3752, 'grad_norm': 0.0006307783618388287, 'learning_rate': 0.003798061746947995, 'epoch': 0.95} + 95%|█████████▍| 492/520 [47:13<01:47, 3.82s/it] 95%|█████████▍| 493/520 [47:17<01:43, 3.84s/it] {'loss': 1.4559, 'grad_norm': 0.0006228696658118236, 'learning_rate': 0.0035322453704410284, 'epoch': 0.95} + 95%|█████████▍| 493/520 [47:17<01:43, 3.84s/it] 95%|█████████▌| 494/520 [47:21<01:39, 3.85s/it] {'loss': 1.3079, 'grad_norm': 0.0005503178921040365, 'learning_rate': 0.0032760052858197275, 'epoch': 0.95} + 95%|█████████▌| 494/520 [47:21<01:39, 3.85s/it] 95%|█████████▌| 495/520 [47:25<01:36, 3.85s/it] {'loss': 1.271, 'grad_norm': 0.0005946108998201476, 'learning_rate': 0.0030293514490713214, 'epoch': 0.95} + 95%|█████████▌| 495/520 [47:25<01:36, 3.85s/it] 95%|█████████▌| 496/520 [47:29<01:32, 3.86s/it] {'loss': 1.2099, 'grad_norm': 0.0006388464768822222, 'learning_rate': 0.0027922934437178693, 'epoch': 0.95} + 95%|█████████▌| 496/520 [47:29<01:32, 3.86s/it] 96%|█████████▌| 497/520 [47:32<01:28, 3.86s/it] {'loss': 1.3272, 'grad_norm': 0.0005541351743689792, 'learning_rate': 0.002564840480443503, 'epoch': 0.96} + 96%|█████████▌| 497/520 [47:32<01:28, 3.86s/it] 96%|█████████▌| 498/520 [47:36<01:25, 3.87s/it] {'loss': 1.2866, 'grad_norm': 0.0007115464090796896, 'learning_rate': 0.0023470013967367975, 'epoch': 0.96} + 96%|█████████▌| 498/520 [47:36<01:25, 3.87s/it] 96%|█████████▌| 499/520 [47:40<01:21, 3.86s/it] {'loss': 1.4857, 'grad_norm': 0.000549795925269601, 'learning_rate': 0.0021387846565474045, 'epoch': 0.96} + 96%|█████████▌| 499/520 [47:40<01:21, 3.86s/it] 96%|█████████▌| 500/520 [47:44<01:16, 3.84s/it] {'loss': 1.4026, 'grad_norm': 0.0006412118023711056, 'learning_rate': 0.0019401983499569841, 'epoch': 0.96} + 96%|█████████▌| 500/520 [47:44<01:16, 3.84s/it] 96%|█████████▋| 501/520 [47:48<01:13, 3.85s/it] {'loss': 1.3886, 'grad_norm': 0.0006088322155607744, 'learning_rate': 0.0017512501928650948, 'epoch': 0.96} + 96%|█████████▋| 501/520 [47:48<01:13, 3.85s/it] 97%|█████████▋| 502/520 [47:52<01:09, 3.85s/it] {'loss': 1.3043, 'grad_norm': 0.0005702184527337724, 'learning_rate': 0.001571947526689349, 'epoch': 0.97} + 97%|█████████▋| 502/520 [47:52<01:09, 3.85s/it] 97%|█████████▋| 503/520 [47:56<01:05, 3.86s/it] {'loss': 1.3566, 'grad_norm': 0.0005776181170048267, 'learning_rate': 0.001402297318080059, 'epoch': 0.97} + 97%|█████████▋| 503/520 [47:56<01:05, 3.86s/it] 97%|█████████▋| 504/520 [47:59<01:01, 3.87s/it] {'loss': 1.3044, 'grad_norm': 0.0006649595379931442, 'learning_rate': 0.0012423061586496476, 'epoch': 0.97} + 97%|█████████▋| 504/520 [47:59<01:01, 3.87s/it] 97%|█████████▋| 505/520 [48:03<00:58, 3.88s/it] {'loss': 1.3465, 'grad_norm': 0.0007378915149585497, 'learning_rate': 0.0010919802647165466, 'epoch': 0.97} + 97%|█████████▋| 505/520 [48:03<00:58, 3.88s/it] 97%|█████████▋| 506/520 [48:07<00:54, 3.89s/it] {'loss': 1.2554, 'grad_norm': 0.0005848892132518819, 'learning_rate': 0.0009513254770636137, 'epoch': 0.97} + 97%|█████████▋| 506/520 [48:07<00:54, 3.89s/it] 98%|█████████▊| 507/520 [48:11<00:50, 3.89s/it] {'loss': 1.5173, 'grad_norm': 0.0005393101960651617, 'learning_rate': 0.0008203472607112294, 'epoch': 0.97} + 98%|█████████▊| 507/520 [48:11<00:50, 3.89s/it] 98%|█████████▊| 508/520 [48:15<00:46, 3.89s/it] {'loss': 1.3841, 'grad_norm': 0.0005483544368037071, 'learning_rate': 0.0006990507047049677, 'epoch': 0.98} + 98%|█████████▊| 508/520 [48:15<00:46, 3.89s/it] 98%|█████████▊| 509/520 [48:19<00:42, 3.88s/it] {'loss': 1.3464, 'grad_norm': 0.0005799026779979648, 'learning_rate': 0.0005874405219177814, 'epoch': 0.98} + 98%|█████████▊| 509/520 [48:19<00:42, 3.88s/it] 98%|█████████▊| 510/520 [48:23<00:38, 3.89s/it] {'loss': 1.3024, 'grad_norm': 0.0007577848582532217, 'learning_rate': 0.00048552104886703806, 'epoch': 0.98} + 98%|█████████▊| 510/520 [48:23<00:38, 3.89s/it] 98%|█████████▊| 511/520 [48:27<00:34, 3.89s/it] {'loss': 1.2683, 'grad_norm': 0.0006576935425467449, 'learning_rate': 0.00039329624554584885, 'epoch': 0.98} + 98%|█████████▊| 511/520 [48:27<00:34, 3.89s/it] 98%|█████████▊| 512/520 [48:31<00:31, 3.88s/it] {'loss': 1.1537, 'grad_norm': 0.0006203741362529577, 'learning_rate': 0.0003107696952694139, 'epoch': 0.98} + 98%|█████████▊| 512/520 [48:31<00:31, 3.88s/it] 99%|█████████▊| 513/520 [48:34<00:27, 3.88s/it] {'loss': 1.3592, 'grad_norm': 0.0006615571373479184, 'learning_rate': 0.00023794460453555044, 'epoch': 0.99} + 99%|█████████▊| 513/520 [48:34<00:27, 3.88s/it] 99%|█████████▉| 514/520 [48:38<00:23, 3.88s/it] {'loss': 1.3393, 'grad_norm': 0.0005486019762485832, 'learning_rate': 0.00017482380290034794, 'epoch': 0.99} + 99%|█████████▉| 514/520 [48:38<00:23, 3.88s/it] 99%|█████████▉| 515/520 [48:42<00:19, 3.87s/it] {'loss': 1.4052, 'grad_norm': 0.0007547781917310577, 'learning_rate': 0.00012140974286808937, 'epoch': 0.99} + 99%|█████████▉| 515/520 [48:42<00:19, 3.87s/it] 99%|█████████▉| 516/520 [48:46<00:15, 3.87s/it] {'loss': 1.2828, 'grad_norm': 0.0005989339886666185, 'learning_rate': 7.770449979593863e-05, 'epoch': 0.99} + 99%|█████████▉| 516/520 [48:46<00:15, 3.87s/it] 99%|█████████▉| 517/520 [48:50<00:11, 3.86s/it] {'loss': 1.4206, 'grad_norm': 0.0005870040149999795, 'learning_rate': 4.370977181339386e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [48:50<00:11, 3.86s/it] 100%|█████████▉| 518/520 [48:54<00:07, 3.84s/it] {'loss': 1.3042, 'grad_norm': 0.0005731126584649322, 'learning_rate': 1.9426879756284654e-05, 'epoch': 1.0} + 100%|█████████▉| 518/520 [48:54<00:07, 3.84s/it] 100%|█████████▉| 519/520 [48:58<00:03, 3.86s/it] {'loss': 1.3851, 'grad_norm': 0.0006914346091343445, 'learning_rate': 4.856767115452021e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [48:58<00:03, 3.86s/it] 100%|██████████| 520/520 [49:02<00:00, 4.10s/it] {'loss': 1.4195, 'grad_norm': 0.0005934277525024359, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [49:02<00:00, 4.10s/it] {'train_runtime': 2942.7859, 'train_samples_per_second': 22.607, 'train_steps_per_second': 0.177, 'train_loss': 1.3932792867605504, 'epoch': 1.0} + 100%|██████████| 520/520 [49:02<00:00, 4.10s/it] 100%|██████████| 520/520 [49:02<00:00, 5.66s/it] +[2025-10-09 09:43:19,282] [INFO] [launch.py:348:main] Process 1025828 exits successfully. +[2025-10-09 09:43:19,283] [INFO] [launch.py:348:main] Process 1025824 exits successfully. +[2025-10-09 09:43:19,283] [INFO] [launch.py:348:main] Process 1025826 exits successfully. +[2025-10-09 09:43:19,284] [INFO] [launch.py:348:main] Process 1025823 exits successfully. +[2025-10-09 09:43:19,284] [INFO] [launch.py:348:main] Process 1025825 exits successfully. +[2025-10-09 09:43:20,285] [INFO] [launch.py:348:main] Process 1025822 exits successfully. +[2025-10-09 09:43:20,286] [INFO] [launch.py:348:main] Process 1025827 exits successfully. +[2025-10-09 09:43:23,289] [INFO] [launch.py:348:main] Process 1025821 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_5e-1_connector-3.0_0.5_5e-1_ablation_20251009_085249.log +Timestamp: 2025-10-09 09:43:25 +===================================== diff --git a/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251009_062342.log b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251009_062342.log new file mode 100644 index 0000000000000000000000000000000000000000..0e6358b6acaa1da128f4599c5eaca0eadd146edd --- /dev/null +++ b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251009_062342.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251009_062342.log +Timestamp: 2025-10-09 06:23:42 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 06:23:44,944] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:23:47,708] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-09 06:23:47,709] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 7 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 7 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 06:23:50,327] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:23:51,367] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-09 06:23:51,367] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-09 06:23:51,367] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-09 06:23:51,367] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-09 06:23:51,367] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-09 06:23:51,367] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-09 06:23:51,367] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-09 06:23:51,369] [INFO] [launch.py:253:main] process 847286 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:23:51,371] [INFO] [launch.py:253:main] process 847287 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:23:51,373] [INFO] [launch.py:253:main] process 847288 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:23:51,376] [INFO] [launch.py:253:main] process 847289 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:23:51,378] [INFO] [launch.py:253:main] process 847290 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:23:51,380] [INFO] [launch.py:253:main] process 847291 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:23:51,382] [INFO] [launch.py:253:main] process 847292 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:23:51,384] [INFO] [launch.py:253:main] process 847293 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 06:23:58,044] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:23:58,127] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:23:58,346] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:23:58,385] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:23:58,388] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:23:58,388] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:23:58,388] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:23:58,388] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:23:58,467] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:23:58,539] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:23:58,751] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:23:58,790] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:23:58,791] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:23:58,793] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:23:58,793] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-09 06:23:58,796] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:23:58,798] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:847286:847286 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847286:847286 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847286:847286 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:847286:847286 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:847286:847286 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:847286:847286 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +ywang29-vrdb-test1-worker-0:847291:847291 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:847291:847291 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847291:847291 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847292:847292 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:847291:847291 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:847291:847291 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:847291:847291 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:847292:847292 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847292:847292 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847292:847292 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:847292:847292 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:847292:847292 [6] NCCL INFO NET/Plugin: Using internal network plugin. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Using network Socket +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:847289:847289 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:847289:847289 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847289:847289 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847289:847289 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:847289:847289 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:847289:847289 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:847290:847290 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:847290:847290 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847290:847290 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847290:847290 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:847290:847290 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:847290:847290 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:847287:847287 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:847287:847287 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847287:847287 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847287:847287 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:847287:847287 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:847287:847287 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:847293:847293 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:847293:847293 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847293:847293 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847293:847293 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:847293:847293 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:847293:847293 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:847288:847288 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:847288:847288 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847288:847288 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847288:847288 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:847288:847288 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:847288:847288 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO ncclCommInitRank comm 0x561e88930a00 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x730d590a065c5bdd - Init START +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO ncclCommInitRank comm 0x55e427917b40 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x730d590a065c5bdd - Init START +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO ncclCommInitRank comm 0x55ff82e47df0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x730d590a065c5bdd - Init START +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO ncclCommInitRank comm 0x556cc46e53c0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x730d590a065c5bdd - Init START +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO ncclCommInitRank comm 0x5654e1e2b4c0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x730d590a065c5bdd - Init START +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO ncclCommInitRank comm 0x56243fe97d10 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x730d590a065c5bdd - Init START +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO ncclCommInitRank comm 0x562ffae17e80 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x730d590a065c5bdd - Init START +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO ncclCommInitRank comm 0x55d172cfeb50 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x730d590a065c5bdd - Init START +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO comm 0x55d172cfeb50 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO comm 0x55ff82e47df0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO comm 0x562ffae17e80 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO comm 0x55e427917b40 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO comm 0x5654e1e2b4c0 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO comm 0x561e88930a00 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO comm 0x56243fe97d10 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO comm 0x556cc46e53c0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:847291:848887 [5] NCCL INFO ncclCommInitRank comm 0x562ffae17e80 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x730d590a065c5bdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847292:848888 [6] NCCL INFO ncclCommInitRank comm 0x55d172cfeb50 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x730d590a065c5bdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:847293:848892 [7] NCCL INFO ncclCommInitRank comm 0x561e88930a00 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x730d590a065c5bdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847290:848890 [4] NCCL INFO ncclCommInitRank comm 0x55e427917b40 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x730d590a065c5bdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:847289:848889 [3] NCCL INFO ncclCommInitRank comm 0x55ff82e47df0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x730d590a065c5bdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:847288:848893 [2] NCCL INFO ncclCommInitRank comm 0x5654e1e2b4c0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x730d590a065c5bdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847287:848891 [1] NCCL INFO ncclCommInitRank comm 0x56243fe97d10 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x730d590a065c5bdd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847286:848886 [0] NCCL INFO ncclCommInitRank comm 0x556cc46e53c0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x730d590a065c5bdd - Init COMPLETE +[2025-10-09 06:24:43,040] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-09 06:24:44,795] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-09 06:24:57,917 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-09 06:24:57,922 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:007->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:847293:853794 [7] NCCL INFO ncclCommInitRank comm 0x7f727006b240 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xc0b4d1098a0e64cd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847287:853792 [1] NCCL INFO ncclCommInitRank comm 0x7ff0d406b1c0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xc0b4d1098a0e64cd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847290:853790 [4] NCCL INFO ncclCommInitRank comm 0x7f1a6006b840 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xc0b4d1098a0e64cd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847286:853788 [0] NCCL INFO ncclCommInitRank comm 0x7f5c4006b920 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xc0b4d1098a0e64cd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847289:853791 [3] NCCL INFO ncclCommInitRank comm 0x7f100006bcd0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xc0b4d1098a0e64cd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847288:853793 [2] NCCL INFO ncclCommInitRank comm 0x7f6db406b010 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xc0b4d1098a0e64cd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847291:853789 [5] NCCL INFO ncclCommInitRank comm 0x7f211406b6b0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xc0b4d1098a0e64cd - Init COMPLETE +ywang29-vrdb-test1-worker-0:847292:853795 [6] NCCL INFO ncclCommInitRank comm 0x7f123806ae80 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xc0b4d1098a0e64cd - Init COMPLETE + 0%| | 1/520 [00:28<4:09:57, 28.90s/it] {'loss': 2.0453, 'grad_norm': 0.0048341062659359715, 'learning_rate': 0.4375, 'epoch': 0.0} + 0%| | 1/520 [00:28<4:09:57, 28.90s/it] 0%| | 2/520 [00:32<2:02:31, 14.19s/it] {'loss': 2.0549, 'grad_norm': 0.005249259221224586, 'learning_rate': 0.875, 'epoch': 0.0} + 0%| | 2/520 [00:32<2:02:31, 14.19s/it] 1%| | 3/520 [00:36<1:21:41, 9.48s/it] {'loss': 1.6875, 'grad_norm': 0.0025537936159355924, 'learning_rate': 1.3125, 'epoch': 0.01} + 1%| | 3/520 [00:36<1:21:41, 9.48s/it] 1%| | 4/520 [00:40<1:02:23, 7.25s/it] {'loss': 1.7629, 'grad_norm': 0.005916696508590636, 'learning_rate': 1.75, 'epoch': 0.01} + 1%| | 4/520 [00:40<1:02:23, 7.25s/it] 1%| | 5/520 [00:44<51:44, 6.03s/it] {'loss': 5.8371, 'grad_norm': 0.3553202681030835, 'learning_rate': 2.1875, 'epoch': 0.01} + 1%| | 5/520 [00:44<51:44, 6.03s/it] 1%| | 6/520 [00:48<45:18, 5.29s/it] {'loss': 7.541, 'grad_norm': 0.3923344458061372, 'learning_rate': 2.625, 'epoch': 0.01} + 1%| | 6/520 [00:48<45:18, 5.29s/it] 1%|▏ | 7/520 [00:52<41:15, 4.83s/it] {'loss': 16.3015, 'grad_norm': 0.978888900246237, 'learning_rate': 3.0625, 'epoch': 0.01} + 1%|▏ | 7/520 [00:52<41:15, 4.83s/it] 2%|▏ | 8/520 [00:56<40:12, 4.71s/it] {'loss': 17.5403, 'grad_norm': 1.1277912720967218, 'learning_rate': 3.5, 'epoch': 0.02} + 2%|▏ | 8/520 [00:56<40:12, 4.71s/it] 2%|▏ | 9/520 [01:00<39:14, 4.61s/it] {'loss': 12.9121, 'grad_norm': 0.06543973395654913, 'learning_rate': 3.9375, 'epoch': 0.02} + 2%|▏ | 9/520 [01:00<39:14, 4.61s/it] 2%|▏ | 10/520 [01:04<37:05, 4.36s/it] {'loss': 11.7563, 'grad_norm': 0.02175881462401668, 'learning_rate': 4.375, 'epoch': 0.02} + 2%|▏ | 10/520 [01:04<37:05, 4.36s/it] 2%|▏ | 11/520 [01:08<35:56, 4.24s/it] {'loss': 11.3723, 'grad_norm': 0.011635558156010042, 'learning_rate': 4.8125, 'epoch': 0.02} + 2%|▏ | 11/520 [01:08<35:56, 4.24s/it] 2%|▏ | 12/520 [01:12<34:54, 4.12s/it] {'loss': 11.1113, 'grad_norm': 0.005357750882086769, 'learning_rate': 5.25, 'epoch': 0.02} + 2%|▏ | 12/520 [01:12<34:54, 4.12s/it][2025-10-09 06:26:19,686] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:17<35:53, 4.25s/it] {'loss': 9.8194, 'grad_norm': 0.004429801223025109, 'learning_rate': 5.6875, 'epoch': 0.03} + 2%|▎ | 13/520 [01:17<35:53, 4.25s/it] 3%|▎ | 14/520 [01:20<34:50, 4.13s/it] {'loss': 10.0818, 'grad_norm': 0.0027895286545571813, 'learning_rate': 6.125, 'epoch': 0.03} + 3%|▎ | 14/520 [01:20<34:50, 4.13s/it] 3%|▎ | 15/520 [01:24<34:10, 4.06s/it] {'loss': 10.6376, 'grad_norm': 0.002384564827354705, 'learning_rate': 6.5625, 'epoch': 0.03} + 3%|▎ | 15/520 [01:24<34:10, 4.06s/it] 3%|▎ | 16/520 [01:28<33:28, 3.99s/it] {'loss': 10.7078, 'grad_norm': 0.001307782373374703, 'learning_rate': 7.0, 'epoch': 0.03} + 3%|▎ | 16/520 [01:28<33:28, 3.99s/it] 3%|▎ | 17/520 [01:32<33:05, 3.95s/it] {'loss': 9.8656, 'grad_norm': 0.0009157570580612223, 'learning_rate': 6.9999320052603835, 'epoch': 0.03} + 3%|▎ | 17/520 [01:32<33:05, 3.95s/it] 3%|▎ | 18/520 [01:36<32:49, 3.92s/it] {'loss': 9.361, 'grad_norm': 0.001021273329012096, 'learning_rate': 6.999728023683412, 'epoch': 0.03} + 3%|▎ | 18/520 [01:36<32:49, 3.92s/it] 4%|▎ | 19/520 [01:40<32:45, 3.92s/it] {'loss': 10.4628, 'grad_norm': 0.0004175208272890833, 'learning_rate': 6.999388063194613, 'epoch': 0.04} + 4%|▎ | 19/520 [01:40<32:45, 3.92s/it] 4%|▍ | 20/520 [01:44<32:32, 3.91s/it] {'loss': 9.3491, 'grad_norm': 0.00040223164955017837, 'learning_rate': 6.998912137002857, 'epoch': 0.04} + 4%|▍ | 20/520 [01:44<32:32, 3.91s/it] 4%|▍ | 21/520 [01:48<32:30, 3.91s/it] {'loss': 10.4408, 'grad_norm': 0.00034834848769471546, 'learning_rate': 6.998300263599846, 'epoch': 0.04} + 4%|▍ | 21/520 [01:48<32:30, 3.91s/it] 4%|▍ | 22/520 [01:51<32:19, 3.89s/it] {'loss': 9.5826, 'grad_norm': 0.00035182339390620764, 'learning_rate': 6.997552466759395, 'epoch': 0.04} + 4%|▍ | 22/520 [01:51<32:19, 3.89s/it] 4%|▍ | 23/520 [01:55<31:50, 3.84s/it] {'loss': 9.4037, 'grad_norm': 0.0003152194202537533, 'learning_rate': 6.996668775536502, 'epoch': 0.04} + 4%|▍ | 23/520 [01:55<31:50, 3.84s/it] 5%|▍ | 24/520 [01:59<31:21, 3.79s/it] {'loss': 10.1489, 'grad_norm': 0.0002531845232727852, 'learning_rate': 6.995649224266228, 'epoch': 0.05} + 5%|▍ | 24/520 [01:59<31:21, 3.79s/it] 5%|▍ | 25/520 [02:03<31:16, 3.79s/it] {'loss': 9.6818, 'grad_norm': 0.0003396356102859581, 'learning_rate': 6.994493852562358, 'epoch': 0.05} + 5%|▍ | 25/520 [02:03<31:16, 3.79s/it] 5%|▌ | 26/520 [02:07<31:29, 3.82s/it] {'loss': 9.6185, 'grad_norm': 0.00021160160695897463, 'learning_rate': 6.993202705315862, 'epoch': 0.05} + 5%|▌ | 26/520 [02:07<31:29, 3.82s/it] 5%|▌ | 27/520 [02:10<31:33, 3.84s/it] {'loss': 9.2261, 'grad_norm': 0.000225311439454736, 'learning_rate': 6.991775832693151, 'epoch': 0.05} + 5%|▌ | 27/520 [02:10<31:33, 3.84s/it] 5%|▌ | 28/520 [02:14<31:30, 3.84s/it] {'loss': 9.21, 'grad_norm': 0.0002132870285062051, 'learning_rate': 6.99021329013413, 'epoch': 0.05} + 5%|▌ | 28/520 [02:14<31:30, 3.84s/it] 6%|▌ | 29/520 [02:18<31:33, 3.86s/it] {'loss': 9.0572, 'grad_norm': 0.0002561842801119871, 'learning_rate': 6.988515138350043, 'epoch': 0.06} + 6%|▌ | 29/520 [02:18<31:33, 3.86s/it] 6%|▌ | 30/520 [02:22<31:36, 3.87s/it] {'loss': 10.148, 'grad_norm': 0.00023986587887247902, 'learning_rate': 6.9866814433211095, 'epoch': 0.06} + 6%|▌ | 30/520 [02:22<31:36, 3.87s/it] 6%|▌ | 31/520 [02:26<31:38, 3.88s/it] {'loss': 8.8319, 'grad_norm': 0.000282446160833904, 'learning_rate': 6.984712276293968, 'epoch': 0.06} + 6%|▌ | 31/520 [02:26<31:38, 3.88s/it] 6%|▌ | 32/520 [02:30<31:33, 3.88s/it] {'loss': 11.0891, 'grad_norm': 0.0002771905110779483, 'learning_rate': 6.982607713778905, 'epoch': 0.06} + 6%|▌ | 32/520 [02:30<31:33, 3.88s/it] 6%|▋ | 33/520 [02:34<31:28, 3.88s/it] {'loss': 9.4573, 'grad_norm': 0.00018798702191891685, 'learning_rate': 6.980367837546879, 'epoch': 0.06} + 6%|▋ | 33/520 [02:34<31:28, 3.88s/it] 7%|▋ | 34/520 [02:38<31:22, 3.87s/it] {'loss': 9.1653, 'grad_norm': 0.00019530867506975318, 'learning_rate': 6.9779927346263495, 'epoch': 0.07} + 7%|▋ | 34/520 [02:38<31:22, 3.87s/it] 7%|▋ | 35/520 [02:41<31:19, 3.88s/it] {'loss': 9.3185, 'grad_norm': 0.00013394561098858439, 'learning_rate': 6.975482497299888, 'epoch': 0.07} + 7%|▋ | 35/520 [02:41<31:19, 3.88s/it] 7%|▋ | 36/520 [02:45<31:14, 3.87s/it] {'loss': 9.8632, 'grad_norm': 0.00020811588569425663, 'learning_rate': 6.972837223100603, 'epoch': 0.07} + 7%|▋ | 36/520 [02:45<31:14, 3.87s/it] 7%|▋ | 37/520 [02:49<31:14, 3.88s/it] {'loss': 10.0604, 'grad_norm': 0.00013471626124572617, 'learning_rate': 6.9700570148083365, 'epoch': 0.07} + 7%|▋ | 37/520 [02:49<31:14, 3.88s/it] 7%|▋ | 38/520 [02:53<30:45, 3.83s/it] {'loss': 9.8418, 'grad_norm': 0.00020400342270181758, 'learning_rate': 6.9671419804456844, 'epoch': 0.07} + 7%|▋ | 38/520 [02:53<30:45, 3.83s/it] 8%|▊ | 39/520 [02:57<30:23, 3.79s/it] {'loss': 9.2111, 'grad_norm': 0.0001322634206155229, 'learning_rate': 6.964092233273791, 'epoch': 0.07} + 8%|▊ | 39/520 [02:57<30:23, 3.79s/it] 8%|▊ | 40/520 [03:00<30:07, 3.76s/it] {'loss': 9.3115, 'grad_norm': 0.00018269796335148564, 'learning_rate': 6.960907891787949, 'epoch': 0.08} + 8%|▊ | 40/520 [03:00<30:07, 3.76s/it] 8%|▊ | 41/520 [03:04<29:52, 3.74s/it] {'loss': 9.419, 'grad_norm': 0.00011352542560854919, 'learning_rate': 6.957589079713001, 'epoch': 0.08} + 8%|▊ | 41/520 [03:04<29:52, 3.74s/it] 8%|▊ | 42/520 [03:08<29:36, 3.72s/it] {'loss': 9.9004, 'grad_norm': 0.00012041947408508151, 'learning_rate': 6.954135925998524, 'epoch': 0.08} + 8%|▊ | 42/520 [03:08<29:36, 3.72s/it] 8%|▊ | 43/520 [03:11<29:32, 3.72s/it] {'loss': 10.3416, 'grad_norm': 0.00012178369627043024, 'learning_rate': 6.950548564813825, 'epoch': 0.08} + 8%|▊ | 43/520 [03:11<29:32, 3.72s/it] 8%|▊ | 44/520 [03:15<29:24, 3.71s/it] {'loss': 10.3528, 'grad_norm': 0.00011267002190169879, 'learning_rate': 6.946827135542728, 'epoch': 0.08} + 8%|▊ | 44/520 [03:15<29:24, 3.71s/it] 9%|▊ | 45/520 [03:19<29:15, 3.70s/it] {'loss': 9.0962, 'grad_norm': 0.00013111332981831809, 'learning_rate': 6.942971782778154, 'epoch': 0.09} + 9%|▊ | 45/520 [03:19<29:15, 3.70s/it] 9%|▉ | 46/520 [03:23<29:29, 3.73s/it] {'loss': 10.9147, 'grad_norm': 0.00017718115672544528, 'learning_rate': 6.93898265631651, 'epoch': 0.09} + 9%|▉ | 46/520 [03:23<29:29, 3.73s/it] 9%|▉ | 47/520 [03:26<29:27, 3.74s/it] {'loss': 9.5134, 'grad_norm': 0.00025917779058437024, 'learning_rate': 6.934859911151857, 'epoch': 0.09} + 9%|▉ | 47/520 [03:26<29:27, 3.74s/it] 9%|▉ | 48/520 [03:30<29:21, 3.73s/it] {'loss': 9.26, 'grad_norm': 0.0001524987530410507, 'learning_rate': 6.930603707469904, 'epoch': 0.09} + 9%|▉ | 48/520 [03:30<29:21, 3.73s/it] 9%|▉ | 49/520 [03:34<29:17, 3.73s/it] {'loss': 9.2616, 'grad_norm': 0.00027755941403737495, 'learning_rate': 6.92621421064177, 'epoch': 0.09} + 9%|▉ | 49/520 [03:34<29:17, 3.73s/it] 10%|▉ | 50/520 [03:38<29:20, 3.75s/it] {'loss': 9.2145, 'grad_norm': 0.00014558200513970865, 'learning_rate': 6.9216915912175665, 'epoch': 0.1} + 10%|▉ | 50/520 [03:38<29:20, 3.75s/it] 10%|▉ | 51/520 [03:41<29:39, 3.79s/it] {'loss': 8.9393, 'grad_norm': 0.00018451141527604282, 'learning_rate': 6.917036024919767, 'epoch': 0.1} + 10%|▉ | 51/520 [03:41<29:39, 3.79s/it] 10%|█ | 52/520 [03:45<29:37, 3.80s/it] {'loss': 9.7065, 'grad_norm': 7.638605502913839e-05, 'learning_rate': 6.912247692636383, 'epoch': 0.1} + 10%|█ | 52/520 [03:45<29:37, 3.80s/it] 10%|█ | 53/520 [03:49<29:22, 3.77s/it] {'loss': 9.5572, 'grad_norm': 7.572490647915465e-05, 'learning_rate': 6.907326780413931, 'epoch': 0.1} + 10%|█ | 53/520 [03:49<29:22, 3.77s/it] 10%|█ | 54/520 [03:53<29:13, 3.76s/it] {'loss': 8.8731, 'grad_norm': 8.906754188318361e-05, 'learning_rate': 6.90227347945021, 'epoch': 0.1} + 10%|█ | 54/520 [03:53<29:13, 3.76s/it] 11%|█ | 55/520 [03:56<29:07, 3.76s/it] {'loss': 9.2706, 'grad_norm': 6.45426695054534e-05, 'learning_rate': 6.897087986086868, 'epoch': 0.11} + 11%|█ | 55/520 [03:56<29:07, 3.76s/it] 11%|█ | 56/520 [04:00<28:52, 3.73s/it] {'loss': 9.5868, 'grad_norm': 7.727172895507173e-05, 'learning_rate': 6.891770501801773, 'epoch': 0.11} + 11%|█ | 56/520 [04:00<28:52, 3.73s/it] 11%|█ | 57/520 [04:04<28:40, 3.72s/it] {'loss': 9.1006, 'grad_norm': 7.812772752144854e-05, 'learning_rate': 6.886321233201187, 'epoch': 0.11} + 11%|█ | 57/520 [04:04<28:40, 3.72s/it] 11%|█ | 58/520 [04:08<28:41, 3.73s/it] {'loss': 9.3569, 'grad_norm': 0.00010082685516805685, 'learning_rate': 6.880740392011739, 'epoch': 0.11} + 11%|█ | 58/520 [04:08<28:41, 3.73s/it] 11%|█▏ | 59/520 [04:11<28:35, 3.72s/it] {'loss': 9.8975, 'grad_norm': 0.00011616491624403975, 'learning_rate': 6.875028195072197, 'epoch': 0.11} + 11%|█▏ | 59/520 [04:11<28:35, 3.72s/it] 12%|█▏ | 60/520 [04:15<28:43, 3.75s/it] {'loss': 9.6032, 'grad_norm': 7.877829511549602e-05, 'learning_rate': 6.8691848643250415, 'epoch': 0.12} + 12%|█▏ | 60/520 [04:15<28:43, 3.75s/it] 12%|█▏ | 61/520 [04:19<28:32, 3.73s/it] {'loss': 10.4911, 'grad_norm': 7.734012193695716e-05, 'learning_rate': 6.863210626807849, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:19<28:32, 3.73s/it] 12%|█▏ | 62/520 [04:23<28:31, 3.74s/it] {'loss': 9.5968, 'grad_norm': 7.056142866710801e-05, 'learning_rate': 6.857105714644457, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:23<28:31, 3.74s/it] 12%|█▏ | 63/520 [04:26<28:23, 3.73s/it] {'loss': 9.1825, 'grad_norm': 7.105303728798033e-05, 'learning_rate': 6.850870365035963, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:26<28:23, 3.73s/it] 12%|█▏ | 64/520 [04:30<28:12, 3.71s/it] {'loss': 9.1606, 'grad_norm': 8.050953640529716e-05, 'learning_rate': 6.844504820251493, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:30<28:12, 3.71s/it] 12%|█▎ | 65/520 [04:34<28:14, 3.72s/it] {'loss': 9.6377, 'grad_norm': 9.443178651561623e-05, 'learning_rate': 6.838009327618794, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:34<28:14, 3.72s/it] 13%|█▎ | 66/520 [04:37<28:03, 3.71s/it] {'loss': 9.6127, 'grad_norm': 6.610491702598267e-05, 'learning_rate': 6.831384139514629, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:37<28:03, 3.71s/it] 13%|█▎ | 67/520 [04:41<28:01, 3.71s/it] {'loss': 9.1244, 'grad_norm': 0.00011059482548391516, 'learning_rate': 6.82462951335496, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:41<28:01, 3.71s/it] 13%|█▎ | 68/520 [04:45<27:58, 3.71s/it] {'loss': 8.8021, 'grad_norm': 0.00011270162910238129, 'learning_rate': 6.817745711584961, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:45<27:58, 3.71s/it] 13%|█▎ | 69/520 [04:48<27:44, 3.69s/it] {'loss': 8.9202, 'grad_norm': 0.00010516630169798864, 'learning_rate': 6.8107330016688055, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:48<27:44, 3.69s/it] 13%|█▎ | 70/520 [04:52<27:42, 3.69s/it] {'loss': 9.1559, 'grad_norm': 7.612578961417374e-05, 'learning_rate': 6.803591656079287, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:52<27:42, 3.69s/it] 14%|█▎ | 71/520 [04:56<27:55, 3.73s/it] {'loss': 8.8567, 'grad_norm': 0.0001375949064505729, 'learning_rate': 6.796321952287222, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:56<27:55, 3.73s/it] 14%|█▍ | 72/520 [05:00<28:14, 3.78s/it] {'loss': 9.3365, 'grad_norm': 0.0001412871570688389, 'learning_rate': 6.788924172750679, 'epoch': 0.14} + 14%|█▍ | 72/520 [05:00<28:14, 3.78s/it] 14%|█▍ | 73/520 [05:04<28:24, 3.81s/it] {'loss': 8.8239, 'grad_norm': 0.00035596101146531813, 'learning_rate': 6.781398604903997, 'epoch': 0.14} + 14%|█▍ | 73/520 [05:04<28:24, 3.81s/it] 14%|█▍ | 74/520 [05:08<28:28, 3.83s/it] {'loss': 9.4793, 'grad_norm': 8.251178708173628e-05, 'learning_rate': 6.773745541146619, 'epoch': 0.14} + 14%|█▍ | 74/520 [05:08<28:28, 3.83s/it] 14%|█▍ | 75/520 [05:11<28:31, 3.85s/it] {'loss': 8.4901, 'grad_norm': 0.00011192085093502586, 'learning_rate': 6.765965278831732, 'epoch': 0.14} + 14%|█▍ | 75/520 [05:11<28:31, 3.85s/it] 15%|█▍ | 76/520 [05:15<28:32, 3.86s/it] {'loss': 10.38, 'grad_norm': 0.00011461888213720236, 'learning_rate': 6.758058120254715, 'epoch': 0.15} + 15%|█▍ | 76/520 [05:15<28:32, 3.86s/it] 15%|█▍ | 77/520 [05:19<28:26, 3.85s/it] {'loss': 9.0759, 'grad_norm': 8.445046676746625e-05, 'learning_rate': 6.750024372641388, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:19<28:26, 3.85s/it] 15%|█▌ | 78/520 [05:23<28:26, 3.86s/it] {'loss': 8.7731, 'grad_norm': 6.267809590483407e-05, 'learning_rate': 6.7418643481360805, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:23<28:26, 3.86s/it] 15%|█▌ | 79/520 [05:27<28:26, 3.87s/it] {'loss': 9.1462, 'grad_norm': 7.45534039296982e-05, 'learning_rate': 6.733578363789504, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:27<28:26, 3.87s/it] 15%|█▌ | 80/520 [05:31<28:15, 3.85s/it] {'loss': 10.9402, 'grad_norm': 7.482699582158229e-05, 'learning_rate': 6.725166741546427, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:31<28:15, 3.85s/it] 16%|█▌ | 81/520 [05:34<27:49, 3.80s/it] {'loss': 10.0458, 'grad_norm': 7.78423553954638e-05, 'learning_rate': 6.716629808233172, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:34<27:49, 3.80s/it] 16%|█▌ | 82/520 [05:38<27:33, 3.78s/it] {'loss': 9.2163, 'grad_norm': 5.2608757324038206e-05, 'learning_rate': 6.7079678955449165, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:38<27:33, 3.78s/it] 16%|█▌ | 83/520 [05:42<27:26, 3.77s/it] {'loss': 9.5158, 'grad_norm': 5.530590406736438e-05, 'learning_rate': 6.699181340032801, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:42<27:26, 3.77s/it] 16%|█▌ | 84/520 [05:46<27:19, 3.76s/it] {'loss': 9.4245, 'grad_norm': 5.7995298575099435e-05, 'learning_rate': 6.690270483090856, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:46<27:19, 3.76s/it] 16%|█▋ | 85/520 [05:49<27:01, 3.73s/it] {'loss': 9.2761, 'grad_norm': 5.656174798381481e-05, 'learning_rate': 6.681235670942739, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:49<27:01, 3.73s/it] 17%|█▋ | 86/520 [05:53<26:56, 3.72s/it] {'loss': 9.8838, 'grad_norm': 6.746741213571622e-05, 'learning_rate': 6.672077254628276, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:53<26:56, 3.72s/it] 17%|█▋ | 87/520 [05:57<26:48, 3.72s/it] {'loss': 10.4669, 'grad_norm': 6.630083975398004e-05, 'learning_rate': 6.662795589989829, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:57<26:48, 3.72s/it] 17%|█▋ | 88/520 [06:00<26:49, 3.72s/it] {'loss': 10.8945, 'grad_norm': 0.0001191802264182558, 'learning_rate': 6.653391037658467, 'epoch': 0.17} + 17%|█▋ | 88/520 [06:00<26:49, 3.72s/it] 17%|█▋ | 89/520 [06:04<26:39, 3.71s/it] {'loss': 9.3672, 'grad_norm': 5.413611205772745e-05, 'learning_rate': 6.643863963039955, 'epoch': 0.17} + 17%|█▋ | 89/520 [06:04<26:39, 3.71s/it] 17%|█▋ | 90/520 [06:08<26:36, 3.71s/it] {'loss': 9.0588, 'grad_norm': 5.1246510100402446e-05, 'learning_rate': 6.634214736300553, 'epoch': 0.17} + 17%|█▋ | 90/520 [06:08<26:36, 3.71s/it] 18%|█▊ | 91/520 [06:12<26:31, 3.71s/it] {'loss': 9.4285, 'grad_norm': 5.939054211928632e-05, 'learning_rate': 6.62444373235264, 'epoch': 0.17} + 18%|█▊ | 91/520 [06:12<26:31, 3.71s/it] 18%|█▊ | 92/520 [06:15<26:20, 3.69s/it] {'loss': 9.0591, 'grad_norm': 6.343483065656033e-05, 'learning_rate': 6.614551330840141, 'epoch': 0.18} + 18%|█▊ | 92/520 [06:15<26:20, 3.69s/it] 18%|█▊ | 93/520 [06:19<26:16, 3.69s/it] {'loss': 9.2147, 'grad_norm': 6.045118670212567e-05, 'learning_rate': 6.604537916123776, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:19<26:16, 3.69s/it] 18%|█▊ | 94/520 [06:23<26:18, 3.71s/it] {'loss': 9.6835, 'grad_norm': 5.757846986114629e-05, 'learning_rate': 6.594403877266134, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:23<26:18, 3.71s/it] 18%|█▊ | 95/520 [06:26<26:16, 3.71s/it] {'loss': 9.3522, 'grad_norm': 5.986295134483592e-05, 'learning_rate': 6.584149608016548, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:26<26:16, 3.71s/it] 18%|█▊ | 96/520 [06:30<26:09, 3.70s/it] {'loss': 8.7995, 'grad_norm': 6.085702346137907e-05, 'learning_rate': 6.5737755067957995, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:30<26:09, 3.70s/it] 19%|█▊ | 97/520 [06:34<26:06, 3.70s/it] {'loss': 9.3717, 'grad_norm': 8.295635246156252e-05, 'learning_rate': 6.5632819766806385, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:34<26:06, 3.70s/it] 19%|█▉ | 98/520 [06:38<26:11, 3.72s/it] {'loss': 8.685, 'grad_norm': 7.136890294883401e-05, 'learning_rate': 6.552669425388119, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:38<26:11, 3.72s/it] 19%|█▉ | 99/520 [06:41<26:21, 3.76s/it] {'loss': 9.5191, 'grad_norm': 4.9958883816839645e-05, 'learning_rate': 6.541938265259763, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:41<26:21, 3.76s/it] 19%|█▉ | 100/520 [06:45<26:26, 3.78s/it] {'loss': 10.2655, 'grad_norm': 5.3479192880138904e-05, 'learning_rate': 6.531088913245536, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:45<26:26, 3.78s/it] 19%|█▉ | 101/520 [06:49<26:25, 3.79s/it] {'loss': 9.1455, 'grad_norm': 4.600162468644877e-05, 'learning_rate': 6.520121790887646, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:49<26:25, 3.79s/it] 20%|█▉ | 102/520 [06:53<26:35, 3.82s/it] {'loss': 9.2265, 'grad_norm': 6.83253491801869e-05, 'learning_rate': 6.509037324304166, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:53<26:35, 3.82s/it] 20%|█▉ | 103/520 [06:57<26:34, 3.82s/it] {'loss': 8.4732, 'grad_norm': 8.622149697049075e-05, 'learning_rate': 6.497835944172481, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:57<26:34, 3.82s/it] 20%|██ | 104/520 [07:01<26:41, 3.85s/it] {'loss': 9.4862, 'grad_norm': 4.701766492139312e-05, 'learning_rate': 6.486518085712545, 'epoch': 0.2} + 20%|██ | 104/520 [07:01<26:41, 3.85s/it] 20%|██ | 105/520 [07:04<26:33, 3.84s/it] {'loss': 9.1605, 'grad_norm': 6.493193762357765e-05, 'learning_rate': 6.475084188669982, 'epoch': 0.2} + 20%|██ | 105/520 [07:04<26:33, 3.84s/it] 20%|██ | 106/520 [07:08<26:23, 3.83s/it] {'loss': 10.0703, 'grad_norm': 6.355429457300449e-05, 'learning_rate': 6.463534697298995, 'epoch': 0.2} + 20%|██ | 106/520 [07:08<26:23, 3.83s/it] 21%|██ | 107/520 [07:12<26:14, 3.81s/it] {'loss': 10.1149, 'grad_norm': 6.820020436908107e-05, 'learning_rate': 6.4518700603451, 'epoch': 0.21} + 21%|██ | 107/520 [07:12<26:14, 3.81s/it] 21%|██ | 108/520 [07:16<25:58, 3.78s/it] {'loss': 9.2047, 'grad_norm': 5.5173189438083515e-05, 'learning_rate': 6.4400907310277, 'epoch': 0.21} + 21%|██ | 108/520 [07:16<25:58, 3.78s/it] 21%|██ | 109/520 [07:19<25:41, 3.75s/it] {'loss': 10.3127, 'grad_norm': 4.992658597878106e-05, 'learning_rate': 6.42819716702247, 'epoch': 0.21} + 21%|██ | 109/520 [07:19<25:41, 3.75s/it] 21%|██ | 110/520 [07:23<25:38, 3.75s/it] {'loss': 9.7968, 'grad_norm': 5.326312873358663e-05, 'learning_rate': 6.416189830443571, 'epoch': 0.21} + 21%|██ | 110/520 [07:23<25:38, 3.75s/it] 21%|██▏ | 111/520 [07:27<25:46, 3.78s/it] {'loss': 9.8, 'grad_norm': 4.420773776939271e-05, 'learning_rate': 6.404069187825706, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:27<25:46, 3.78s/it] 22%|██▏ | 112/520 [07:31<25:39, 3.77s/it] {'loss': 9.7093, 'grad_norm': 5.770645284303059e-05, 'learning_rate': 6.391835710105982, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:31<25:39, 3.77s/it] 22%|██▏ | 113/520 [07:35<25:40, 3.78s/it] {'loss': 8.8138, 'grad_norm': 5.780524586965233e-05, 'learning_rate': 6.379489872605617, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:35<25:40, 3.78s/it] 22%|██▏ | 114/520 [07:38<25:31, 3.77s/it] {'loss': 9.8148, 'grad_norm': 5.31607478984108e-05, 'learning_rate': 6.367032155011472, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:38<25:31, 3.77s/it] 22%|██▏ | 115/520 [07:42<25:19, 3.75s/it] {'loss': 10.2283, 'grad_norm': 6.353656868010959e-05, 'learning_rate': 6.354463041357411, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:42<25:19, 3.75s/it] 22%|██▏ | 116/520 [07:46<25:07, 3.73s/it] {'loss': 9.62, 'grad_norm': 8.53840019803409e-05, 'learning_rate': 6.341783020005499, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:46<25:07, 3.73s/it] 22%|██▎ | 117/520 [07:49<25:07, 3.74s/it] {'loss': 9.6151, 'grad_norm': 5.5129350784787645e-05, 'learning_rate': 6.328992583627018, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:49<25:07, 3.74s/it] 23%|██▎ | 118/520 [07:53<25:15, 3.77s/it] {'loss': 8.9842, 'grad_norm': 4.7068007256715334e-05, 'learning_rate': 6.316092229183339, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:53<25:15, 3.77s/it] 23%|██▎ | 119/520 [07:57<24:58, 3.74s/it] {'loss': 8.6783, 'grad_norm': 5.86023892126974e-05, 'learning_rate': 6.303082457906598, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:57<24:58, 3.74s/it] 23%|██▎ | 120/520 [08:01<24:54, 3.74s/it] {'loss': 9.1389, 'grad_norm': 5.141643986770886e-05, 'learning_rate': 6.289963775280229, 'epoch': 0.23} + 23%|██▎ | 120/520 [08:01<24:54, 3.74s/it] 23%|██▎ | 121/520 [08:05<25:09, 3.78s/it] {'loss': 8.8304, 'grad_norm': 4.569995963604679e-05, 'learning_rate': 6.276736691019323, 'epoch': 0.23} + 23%|██▎ | 121/520 [08:05<25:09, 3.78s/it] 23%|██▎ | 122/520 [08:08<25:14, 3.81s/it] {'loss': 8.7692, 'grad_norm': 6.188138246726733e-05, 'learning_rate': 6.263401719050824, 'epoch': 0.23} + 23%|██▎ | 122/520 [08:08<25:14, 3.81s/it] 24%|██▎ | 123/520 [08:12<25:08, 3.80s/it] {'loss': 10.2466, 'grad_norm': 6.723156578692567e-05, 'learning_rate': 6.249959377493558, 'epoch': 0.24} + 24%|██▎ | 123/520 [08:12<25:08, 3.80s/it] 24%|██▍ | 124/520 [08:16<24:48, 3.76s/it] {'loss': 9.7881, 'grad_norm': 4.5683123981448795e-05, 'learning_rate': 6.2364101886381045, 'epoch': 0.24} + 24%|██▍ | 124/520 [08:16<24:48, 3.76s/it] 24%|██▍ | 125/520 [08:20<24:35, 3.74s/it] {'loss': 9.2552, 'grad_norm': 4.100380270790033e-05, 'learning_rate': 6.222754678926502, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:20<24:35, 3.74s/it] 24%|██▍ | 126/520 [08:24<25:45, 3.92s/it] {'loss': 9.3569, 'grad_norm': 8.301026155504885e-05, 'learning_rate': 6.208993378931797, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:24<25:45, 3.92s/it] 24%|██▍ | 127/520 [08:28<25:12, 3.85s/it] {'loss': 9.5491, 'grad_norm': 7.232312540641143e-05, 'learning_rate': 6.19512682333742, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:28<25:12, 3.85s/it] 25%|██▍ | 128/520 [08:31<24:53, 3.81s/it] {'loss': 9.3367, 'grad_norm': 4.170033762209966e-05, 'learning_rate': 6.181155550916422, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:31<24:53, 3.81s/it] 25%|██▍ | 129/520 [08:35<24:32, 3.77s/it] {'loss': 8.6167, 'grad_norm': 5.963995253940553e-05, 'learning_rate': 6.1670801045105375, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:35<24:32, 3.77s/it] 25%|██▌ | 130/520 [08:39<24:19, 3.74s/it] {'loss': 9.6374, 'grad_norm': 7.135819478073949e-05, 'learning_rate': 6.152901031009086, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:39<24:19, 3.74s/it] 25%|██▌ | 131/520 [08:42<24:20, 3.75s/it] {'loss': 9.9698, 'grad_norm': 7.919538423174664e-05, 'learning_rate': 6.138618881327729, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:42<24:20, 3.75s/it] 25%|██▌ | 132/520 [08:46<24:13, 3.75s/it] {'loss': 9.5264, 'grad_norm': 4.073871084708311e-05, 'learning_rate': 6.12423421038707, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:46<24:13, 3.75s/it] 26%|██▌ | 133/520 [08:50<23:59, 3.72s/it] {'loss': 9.3922, 'grad_norm': 5.9855830720230515e-05, 'learning_rate': 6.109747577091079, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:50<23:59, 3.72s/it] 26%|██▌ | 134/520 [08:54<23:57, 3.72s/it] {'loss': 9.308, 'grad_norm': 5.091670834823981e-05, 'learning_rate': 6.095159544305393, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:54<23:57, 3.72s/it] 26%|██▌ | 135/520 [08:57<23:53, 3.72s/it] {'loss': 9.7726, 'grad_norm': 4.1205822499845585e-05, 'learning_rate': 6.080470678835434, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:57<23:53, 3.72s/it] 26%|██▌ | 136/520 [09:01<23:49, 3.72s/it] {'loss': 9.0546, 'grad_norm': 5.8630823766908954e-05, 'learning_rate': 6.065681551404392, 'epoch': 0.26} + 26%|██▌ | 136/520 [09:01<23:49, 3.72s/it] 26%|██▋ | 137/520 [09:05<23:44, 3.72s/it] {'loss': 9.3551, 'grad_norm': 3.972864643281657e-05, 'learning_rate': 6.05079273663105, 'epoch': 0.26} + 26%|██▋ | 137/520 [09:05<23:44, 3.72s/it] 27%|██▋ | 138/520 [09:09<23:54, 3.75s/it] {'loss': 9.0311, 'grad_norm': 5.3463007646814584e-05, 'learning_rate': 6.035804813007454, 'epoch': 0.27} + 27%|██▋ | 138/520 [09:09<23:54, 3.75s/it] 27%|██▋ | 139/520 [09:13<24:10, 3.81s/it] {'loss': 9.4713, 'grad_norm': 9.593165294211588e-05, 'learning_rate': 6.020718362876443, 'epoch': 0.27} + 27%|██▋ | 139/520 [09:13<24:10, 3.81s/it] 27%|██▋ | 140/520 [09:16<24:23, 3.85s/it] {'loss': 10.1386, 'grad_norm': 6.37627438492302e-05, 'learning_rate': 6.005533972409014, 'epoch': 0.27} + 27%|██▋ | 140/520 [09:16<24:23, 3.85s/it] 27%|██▋ | 141/520 [09:20<24:23, 3.86s/it] {'loss': 9.4408, 'grad_norm': 4.6235416935733395e-05, 'learning_rate': 5.990252231581556, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:20<24:23, 3.86s/it] 27%|██▋ | 142/520 [09:24<24:28, 3.88s/it] {'loss': 10.115, 'grad_norm': 6.142842159010963e-05, 'learning_rate': 5.974873734152916, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:24<24:28, 3.88s/it] 28%|██▊ | 143/520 [09:28<24:20, 3.87s/it] {'loss': 9.4402, 'grad_norm': 3.3411675339301635e-05, 'learning_rate': 5.959399077641342, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:28<24:20, 3.87s/it] 28%|██▊ | 144/520 [09:32<24:02, 3.84s/it] {'loss': 8.7307, 'grad_norm': 4.6725277561179163e-05, 'learning_rate': 5.943828863301254, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:32<24:02, 3.84s/it] 28%|██▊ | 145/520 [09:36<24:14, 3.88s/it] {'loss': 8.9677, 'grad_norm': 3.853401633592919e-05, 'learning_rate': 5.928163696099896, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:36<24:14, 3.88s/it] 28%|██▊ | 146/520 [09:40<23:59, 3.85s/it] {'loss': 10.3192, 'grad_norm': 5.668064283412769e-05, 'learning_rate': 5.9124041846938145, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:40<23:59, 3.85s/it] 28%|██▊ | 147/520 [09:44<23:55, 3.85s/it] {'loss': 8.8034, 'grad_norm': 6.310574902093333e-05, 'learning_rate': 5.896550941405227, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:44<23:55, 3.85s/it] 28%|██▊ | 148/520 [09:47<23:35, 3.80s/it] {'loss': 9.0561, 'grad_norm': 5.00908392189356e-05, 'learning_rate': 5.880604582198218, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:47<23:35, 3.80s/it] 29%|██▊ | 149/520 [09:51<23:22, 3.78s/it] {'loss': 9.135, 'grad_norm': 7.492580324170873e-05, 'learning_rate': 5.864565726654812, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:51<23:22, 3.78s/it] 29%|██▉ | 150/520 [09:55<23:17, 3.78s/it] {'loss': 9.4872, 'grad_norm': 3.582001400202356e-05, 'learning_rate': 5.848434997950895, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:55<23:17, 3.78s/it] 29%|██▉ | 151/520 [09:58<23:05, 3.76s/it] {'loss': 8.9165, 'grad_norm': 5.045316801385499e-05, 'learning_rate': 5.832213022832014, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:58<23:05, 3.76s/it] 29%|██▉ | 152/520 [10:02<22:57, 3.74s/it] {'loss': 9.0826, 'grad_norm': 5.197578469281455e-05, 'learning_rate': 5.815900431589008, 'epoch': 0.29} + 29%|██▉ | 152/520 [10:02<22:57, 3.74s/it] 29%|██▉ | 153/520 [10:06<22:51, 3.74s/it] {'loss': 8.9321, 'grad_norm': 7.146623765785768e-05, 'learning_rate': 5.799497858033532, 'epoch': 0.29} + 29%|██▉ | 153/520 [10:06<22:51, 3.74s/it] 30%|██▉ | 154/520 [10:09<22:36, 3.71s/it] {'loss': 9.3451, 'grad_norm': 5.062599601262751e-05, 'learning_rate': 5.783005939473425, 'epoch': 0.3} + 30%|██▉ | 154/520 [10:09<22:36, 3.71s/it] 30%|██▉ | 155/520 [10:13<22:40, 3.73s/it] {'loss': 9.4445, 'grad_norm': 4.2104396722825607e-05, 'learning_rate': 5.766425316687947, 'epoch': 0.3} + 30%|██▉ | 155/520 [10:13<22:40, 3.73s/it] 30%|███ | 156/520 [10:17<22:35, 3.72s/it] {'loss': 9.5428, 'grad_norm': 3.686212343810811e-05, 'learning_rate': 5.749756633902887, 'epoch': 0.3} + 30%|███ | 156/520 [10:17<22:35, 3.72s/it] 30%|███ | 157/520 [10:21<22:37, 3.74s/it] {'loss': 10.3867, 'grad_norm': 5.659964488739056e-05, 'learning_rate': 5.7330005387655305, 'epoch': 0.3} + 30%|███ | 157/520 [10:21<22:37, 3.74s/it] 30%|███ | 158/520 [10:24<22:31, 3.73s/it] {'loss': 9.0752, 'grad_norm': 4.9334707958048886e-05, 'learning_rate': 5.71615768231949, 'epoch': 0.3} + 30%|███ | 158/520 [10:24<22:31, 3.73s/it] 31%|███ | 159/520 [10:28<22:26, 3.73s/it] {'loss': 9.0082, 'grad_norm': 4.528774188247477e-05, 'learning_rate': 5.699228718979415, 'epoch': 0.31} + 31%|███ | 159/520 [10:28<22:26, 3.73s/it] 31%|███ | 160/520 [10:32<22:25, 3.74s/it] {'loss': 9.0583, 'grad_norm': 4.986710191079481e-05, 'learning_rate': 5.682214306505568, 'epoch': 0.31} + 31%|███ | 160/520 [10:32<22:25, 3.74s/it] 31%|███ | 161/520 [10:36<22:18, 3.73s/it] {'loss': 9.2146, 'grad_norm': 3.924730169648407e-05, 'learning_rate': 5.665115105978258, 'epoch': 0.31} + 31%|███ | 161/520 [10:36<22:18, 3.73s/it] 31%|███ | 162/520 [10:39<22:10, 3.72s/it] {'loss': 10.2344, 'grad_norm': 5.558737271843522e-05, 'learning_rate': 5.647931781772166, 'epoch': 0.31} + 31%|███ | 162/520 [10:39<22:10, 3.72s/it] 31%|███▏ | 163/520 [10:43<22:05, 3.71s/it] {'loss': 8.8705, 'grad_norm': 6.873497620025783e-05, 'learning_rate': 5.630665001530522, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:43<22:05, 3.71s/it] 32%|███▏ | 164/520 [10:47<22:06, 3.73s/it] {'loss': 8.697, 'grad_norm': 6.288383428861675e-05, 'learning_rate': 5.613315436139171, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:47<22:06, 3.73s/it] 32%|███▏ | 165/520 [10:50<21:56, 3.71s/it] {'loss': 9.0617, 'grad_norm': 3.2795719257426276e-05, 'learning_rate': 5.595883759700501, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:50<21:56, 3.71s/it] 32%|███▏ | 166/520 [10:54<21:59, 3.73s/it] {'loss': 8.9409, 'grad_norm': 4.2648464221445385e-05, 'learning_rate': 5.578370649507255, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:54<21:59, 3.73s/it] 32%|███▏ | 167/520 [10:58<21:54, 3.72s/it] {'loss': 9.4601, 'grad_norm': 3.8781862181813635e-05, 'learning_rate': 5.560776786016216, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:58<21:54, 3.72s/it] 32%|███▏ | 168/520 [11:02<21:52, 3.73s/it] {'loss': 8.9696, 'grad_norm': 3.367001930181192e-05, 'learning_rate': 5.5431028528217645, 'epoch': 0.32} + 32%|███▏ | 168/520 [11:02<21:52, 3.73s/it] 32%|███▎ | 169/520 [11:05<21:48, 3.73s/it] {'loss': 9.2408, 'grad_norm': 3.820935684560854e-05, 'learning_rate': 5.525349536629321, 'epoch': 0.33} + 32%|███▎ | 169/520 [11:05<21:48, 3.73s/it] 33%|███▎ | 170/520 [11:09<21:46, 3.73s/it] {'loss': 9.7663, 'grad_norm': 4.376748243697797e-05, 'learning_rate': 5.507517527228661, 'epoch': 0.33} + 33%|███▎ | 170/520 [11:09<21:46, 3.73s/it] 33%|███▎ | 171/520 [11:13<21:57, 3.77s/it] {'loss': 8.7727, 'grad_norm': 4.160216053616288e-05, 'learning_rate': 5.489607517467124, 'epoch': 0.33} + 33%|███▎ | 171/520 [11:13<21:57, 3.77s/it] 33%|███▎ | 172/520 [11:17<22:10, 3.82s/it] {'loss': 9.1891, 'grad_norm': 3.464440557524158e-05, 'learning_rate': 5.471620203222677, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:17<22:10, 3.82s/it] 33%|███▎ | 173/520 [11:21<22:08, 3.83s/it] {'loss': 8.7624, 'grad_norm': 4.1417628607639296e-05, 'learning_rate': 5.453556283376894, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:21<22:08, 3.83s/it] 33%|███▎ | 174/520 [11:24<21:49, 3.78s/it] {'loss': 9.3894, 'grad_norm': 3.351222700640696e-05, 'learning_rate': 5.435416459787787, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:25<21:49, 3.78s/it] 34%|███▎ | 175/520 [11:28<21:39, 3.77s/it] {'loss': 8.8584, 'grad_norm': 3.783271696088707e-05, 'learning_rate': 5.41720143726255, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:28<21:39, 3.77s/it] 34%|███▍ | 176/520 [11:32<21:34, 3.76s/it] {'loss': 10.3671, 'grad_norm': 3.645701806346784e-05, 'learning_rate': 5.398911923530158, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:32<21:34, 3.76s/it] 34%|███▍ | 177/520 [11:36<21:45, 3.80s/it] {'loss': 9.8109, 'grad_norm': 6.727828714875759e-05, 'learning_rate': 5.380548629213884, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:36<21:45, 3.80s/it] 34%|███▍ | 178/520 [11:40<21:44, 3.81s/it] {'loss': 9.2634, 'grad_norm': 3.811009318320823e-05, 'learning_rate': 5.362112267803678, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:40<21:44, 3.81s/it] 34%|███▍ | 179/520 [11:44<21:48, 3.84s/it] {'loss': 9.1376, 'grad_norm': 3.447455521086129e-05, 'learning_rate': 5.3436035556284525, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:44<21:48, 3.84s/it] 35%|███▍ | 180/520 [11:47<21:47, 3.84s/it] {'loss': 9.3075, 'grad_norm': 3.4512919131261536e-05, 'learning_rate': 5.325023211828243, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:47<21:47, 3.84s/it] 35%|███▍ | 181/520 [11:51<21:49, 3.86s/it] {'loss': 8.8886, 'grad_norm': 2.685701208557617e-05, 'learning_rate': 5.306371958326273, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:51<21:49, 3.86s/it] 35%|███▌ | 182/520 [11:55<21:48, 3.87s/it] {'loss': 9.2882, 'grad_norm': 2.872333090878294e-05, 'learning_rate': 5.2876505198009, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:55<21:48, 3.87s/it] 35%|███▌ | 183/520 [11:59<21:44, 3.87s/it] {'loss': 8.9208, 'grad_norm': 3.513342075953011e-05, 'learning_rate': 5.268859623657458, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:59<21:44, 3.87s/it] 35%|███▌ | 184/520 [12:03<21:38, 3.86s/it] {'loss': 8.7199, 'grad_norm': 5.240395658132808e-05, 'learning_rate': 5.25, 'epoch': 0.35} + 35%|███▌ | 184/520 [12:03<21:38, 3.86s/it] 36%|███▌ | 185/520 [12:07<21:18, 3.82s/it] {'loss': 9.772, 'grad_norm': 4.9214388803513094e-05, 'learning_rate': 5.231072381602926, 'epoch': 0.36} + 36%|███▌ | 185/520 [12:07<21:18, 3.82s/it] 36%|███▌ | 186/520 [12:10<20:58, 3.77s/it] {'loss': 8.9944, 'grad_norm': 2.6321029140541377e-05, 'learning_rate': 5.212077503882513, 'epoch': 0.36} + 36%|███▌ | 186/520 [12:10<20:58, 3.77s/it] 36%|███▌ | 187/520 [12:14<20:41, 3.73s/it] {'loss': 9.4138, 'grad_norm': 2.33094972238283e-05, 'learning_rate': 5.193016104868339, 'epoch': 0.36} + 36%|███▌ | 187/520 [12:14<20:41, 3.73s/it] 36%|███▌ | 188/520 [12:18<20:30, 3.71s/it] {'loss': 8.9587, 'grad_norm': 2.8010725499089642e-05, 'learning_rate': 5.173888925174614, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:18<20:30, 3.71s/it] 36%|███▋ | 189/520 [12:21<20:25, 3.70s/it] {'loss': 9.4431, 'grad_norm': 2.951260476071695e-05, 'learning_rate': 5.154696707971395, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:21<20:25, 3.70s/it] 37%|███▋ | 190/520 [12:25<20:18, 3.69s/it] {'loss': 9.0189, 'grad_norm': 2.7137436151633257e-05, 'learning_rate': 5.135440198955717, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:25<20:18, 3.69s/it] 37%|███▋ | 191/520 [12:29<20:12, 3.69s/it] {'loss': 9.1903, 'grad_norm': 2.2897569057331887e-05, 'learning_rate': 5.116120146322619, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:29<20:12, 3.69s/it] 37%|███▋ | 192/520 [12:32<20:17, 3.71s/it] {'loss': 9.5738, 'grad_norm': 2.497565798074892e-05, 'learning_rate': 5.096737300736071, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:32<20:17, 3.71s/it] 37%|███▋ | 193/520 [12:36<20:09, 3.70s/it] {'loss': 10.0051, 'grad_norm': 3.477572017828785e-05, 'learning_rate': 5.077292415299809, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:36<20:09, 3.70s/it] 37%|███▋ | 194/520 [12:40<20:03, 3.69s/it] {'loss': 9.2565, 'grad_norm': 4.8144414130376684e-05, 'learning_rate': 5.057786245528073, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:40<20:03, 3.69s/it] 38%|███▊ | 195/520 [12:43<19:57, 3.69s/it] {'loss': 9.0097, 'grad_norm': 3.240716443427712e-05, 'learning_rate': 5.038219549316257, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:43<19:57, 3.69s/it] 38%|███▊ | 196/520 [12:47<19:49, 3.67s/it] {'loss': 9.2617, 'grad_norm': 2.2334704614810546e-05, 'learning_rate': 5.018593086911453, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:47<19:49, 3.67s/it] 38%|███▊ | 197/520 [12:51<19:48, 3.68s/it] {'loss': 8.8779, 'grad_norm': 3.3885590467898776e-05, 'learning_rate': 4.998907620882919, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:51<19:48, 3.68s/it] 38%|███▊ | 198/520 [12:54<19:43, 3.68s/it] {'loss': 9.4448, 'grad_norm': 2.324005625896572e-05, 'learning_rate': 4.979163916092448, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:54<19:43, 3.68s/it] 38%|███▊ | 199/520 [12:58<19:44, 3.69s/it] {'loss': 9.1169, 'grad_norm': 2.975363649974282e-05, 'learning_rate': 4.959362739664648, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:58<19:44, 3.69s/it] 38%|███▊ | 200/520 [13:02<19:44, 3.70s/it] {'loss': 9.629, 'grad_norm': 4.146835876826044e-05, 'learning_rate': 4.9395048609571415, 'epoch': 0.38} + 38%|███▊ | 200/520 [13:02<19:44, 3.70s/it] 39%|███▊ | 201/520 [13:06<19:42, 3.71s/it] {'loss': 9.4437, 'grad_norm': 4.2355432794255394e-05, 'learning_rate': 4.919591051530663, 'epoch': 0.39} + 39%|███▊ | 201/520 [13:06<19:42, 3.71s/it] 39%|███▉ | 202/520 [13:09<19:33, 3.69s/it] {'loss': 9.2026, 'grad_norm': 3.0263196816948032e-05, 'learning_rate': 4.899622085119093, 'epoch': 0.39} + 39%|███▉ | 202/520 [13:09<19:33, 3.69s/it] 39%|███▉ | 203/520 [13:13<19:32, 3.70s/it] {'loss': 9.0329, 'grad_norm': 3.003574203708475e-05, 'learning_rate': 4.879598737599388, 'epoch': 0.39} + 39%|███▉ | 203/520 [13:13<19:32, 3.70s/it] 39%|███▉ | 204/520 [13:17<19:30, 3.70s/it] {'loss': 9.5886, 'grad_norm': 3.161225295007144e-05, 'learning_rate': 4.859521786961432, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:17<19:30, 3.70s/it] 39%|███▉ | 205/520 [13:20<19:27, 3.71s/it] {'loss': 9.9077, 'grad_norm': 3.7493470489042665e-05, 'learning_rate': 4.8393920132778145, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:20<19:27, 3.71s/it] 40%|███▉ | 206/520 [13:24<19:28, 3.72s/it] {'loss': 9.6156, 'grad_norm': 3.160503695926623e-05, 'learning_rate': 4.8192101986735185, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:24<19:28, 3.72s/it] 40%|███▉ | 207/520 [13:28<19:26, 3.73s/it] {'loss': 9.8993, 'grad_norm': 4.543551464464164e-05, 'learning_rate': 4.798977127295533, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:28<19:26, 3.73s/it] 40%|████ | 208/520 [13:32<19:17, 3.71s/it] {'loss': 9.1103, 'grad_norm': 4.155350159638073e-05, 'learning_rate': 4.778693585282383, 'epoch': 0.4} + 40%|████ | 208/520 [13:32<19:17, 3.71s/it] 40%|████ | 209/520 [13:35<19:30, 3.76s/it] {'loss': 9.3459, 'grad_norm': 2.6200912774440307e-05, 'learning_rate': 4.758360360733587, 'epoch': 0.4} + 40%|████ | 209/520 [13:35<19:30, 3.76s/it] 40%|████ | 210/520 [13:39<19:16, 3.73s/it] {'loss': 9.1708, 'grad_norm': 4.225589519641556e-05, 'learning_rate': 4.737978243679035, 'epoch': 0.4} + 40%|████ | 210/520 [13:39<19:16, 3.73s/it] 41%|████ | 211/520 [13:43<19:15, 3.74s/it] {'loss': 9.2164, 'grad_norm': 2.1000544902961036e-05, 'learning_rate': 4.717548026048295, 'epoch': 0.41} + 41%|████ | 211/520 [13:43<19:15, 3.74s/it] 41%|████ | 212/520 [13:47<19:22, 3.77s/it] {'loss': 8.6786, 'grad_norm': 3.724743039426759e-05, 'learning_rate': 4.697070501639841, 'epoch': 0.41} + 41%|████ | 212/520 [13:47<19:22, 3.77s/it] 41%|████ | 213/520 [13:51<19:32, 3.82s/it] {'loss': 9.6448, 'grad_norm': 3.623895482233545e-05, 'learning_rate': 4.676546466090208, 'epoch': 0.41} + 41%|████ | 213/520 [13:51<19:32, 3.82s/it] 41%|████ | 214/520 [13:55<19:35, 3.84s/it] {'loss': 9.2801, 'grad_norm': 2.9552711943207846e-05, 'learning_rate': 4.655976716843085, 'epoch': 0.41} + 41%|████ | 214/520 [13:55<19:35, 3.84s/it] 41%|████▏ | 215/520 [13:58<19:31, 3.84s/it] {'loss': 9.6065, 'grad_norm': 4.586816086728483e-05, 'learning_rate': 4.6353620531183255, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:58<19:31, 3.84s/it] 42%|████▏ | 216/520 [14:02<19:33, 3.86s/it] {'loss': 8.9923, 'grad_norm': 4.150763982846101e-05, 'learning_rate': 4.6147032758808955, 'epoch': 0.42} + 42%|████▏ | 216/520 [14:02<19:33, 3.86s/it] 42%|████▏ | 217/520 [14:06<19:34, 3.88s/it] {'loss': 9.0773, 'grad_norm': 1.828003810868974e-05, 'learning_rate': 4.594001187809756, 'epoch': 0.42} + 42%|████▏ | 217/520 [14:06<19:34, 3.88s/it] 42%|████▏ | 218/520 [14:10<19:34, 3.89s/it] {'loss': 9.5241, 'grad_norm': 3.1734217747305956e-05, 'learning_rate': 4.57325659326667, 'epoch': 0.42} + 42%|████▏ | 218/520 [14:10<19:34, 3.89s/it] 42%|████▏ | 219/520 [14:14<19:31, 3.89s/it] {'loss': 8.6299, 'grad_norm': 4.0765508194932574e-05, 'learning_rate': 4.552470298264955, 'epoch': 0.42} + 42%|████▏ | 219/520 [14:14<19:31, 3.89s/it] 42%|████▏ | 220/520 [14:18<19:26, 3.89s/it] {'loss': 9.8843, 'grad_norm': 6.407941217087449e-05, 'learning_rate': 4.531643110438165, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:18<19:26, 3.89s/it] 42%|████▎ | 221/520 [14:22<19:21, 3.88s/it] {'loss': 9.0529, 'grad_norm': 2.9511154897398645e-05, 'learning_rate': 4.510775839008705, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:22<19:21, 3.88s/it] 43%|████▎ | 222/520 [14:26<19:22, 3.90s/it] {'loss': 8.6781, 'grad_norm': 4.233854835184335e-05, 'learning_rate': 4.489869294756396, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:26<19:22, 3.90s/it] 43%|████▎ | 223/520 [14:30<19:09, 3.87s/it] {'loss': 8.7317, 'grad_norm': 4.5253490478272135e-05, 'learning_rate': 4.4689242899869726, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:30<19:09, 3.87s/it] 43%|████▎ | 224/520 [14:33<18:49, 3.81s/it] {'loss': 11.2713, 'grad_norm': 5.481181508794723e-05, 'learning_rate': 4.447941638500518, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:33<18:49, 3.81s/it] 43%|████▎ | 225/520 [14:37<18:32, 3.77s/it] {'loss': 8.9438, 'grad_norm': 3.812140275036906e-05, 'learning_rate': 4.426922155559845, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:37<18:32, 3.77s/it] 43%|████▎ | 226/520 [14:41<18:18, 3.74s/it] {'loss': 9.3331, 'grad_norm': 5.909857608120817e-05, 'learning_rate': 4.405866657858823, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:41<18:18, 3.74s/it] 44%|████▎ | 227/520 [14:44<18:12, 3.73s/it] {'loss': 9.0626, 'grad_norm': 3.2373292015913996e-05, 'learning_rate': 4.384775963490641, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:44<18:12, 3.73s/it] 44%|████▍ | 228/520 [14:48<18:06, 3.72s/it] {'loss': 10.5347, 'grad_norm': 3.6442646339118654e-05, 'learning_rate': 4.363650891916027, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:48<18:06, 3.72s/it] 44%|████▍ | 229/520 [14:52<18:03, 3.72s/it] {'loss': 9.068, 'grad_norm': 3.9465259954909e-05, 'learning_rate': 4.342492263931406, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:52<18:03, 3.72s/it] 44%|████▍ | 230/520 [14:55<18:03, 3.74s/it] {'loss': 9.1195, 'grad_norm': 4.9801784311163236e-05, 'learning_rate': 4.321300901637004, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:55<18:03, 3.74s/it] 44%|████▍ | 231/520 [14:59<17:57, 3.73s/it] {'loss': 9.2614, 'grad_norm': 2.6534386494004888e-05, 'learning_rate': 4.300077628404914, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:59<17:57, 3.73s/it] 45%|████▍ | 232/520 [15:03<17:53, 3.73s/it] {'loss': 10.7332, 'grad_norm': 4.3113978514916716e-05, 'learning_rate': 4.2788232688471, 'epoch': 0.45} + 45%|████▍ | 232/520 [15:03<17:53, 3.73s/it] 45%|████▍ | 233/520 [15:07<17:47, 3.72s/it] {'loss': 10.0363, 'grad_norm': 4.8234590061356126e-05, 'learning_rate': 4.25753864878336, 'epoch': 0.45} + 45%|████▍ | 233/520 [15:07<17:47, 3.72s/it] 45%|████▌ | 234/520 [15:10<17:44, 3.72s/it] {'loss': 8.7374, 'grad_norm': 3.7811265984542925e-05, 'learning_rate': 4.236224595209236, 'epoch': 0.45} + 45%|████▌ | 234/520 [15:10<17:44, 3.72s/it] 45%|████▌ | 235/520 [15:14<17:43, 3.73s/it] {'loss': 9.1181, 'grad_norm': 2.5430157086646502e-05, 'learning_rate': 4.214881936263882, 'epoch': 0.45} + 45%|████▌ | 235/520 [15:14<17:43, 3.73s/it] 45%|████▌ | 236/520 [15:18<17:35, 3.72s/it] {'loss': 9.8538, 'grad_norm': 0.00010529013846146379, 'learning_rate': 4.193511501197891, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:18<17:35, 3.72s/it] 46%|████▌ | 237/520 [15:21<17:31, 3.71s/it] {'loss': 9.3445, 'grad_norm': 5.071071593965847e-05, 'learning_rate': 4.172114120341077, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:21<17:31, 3.71s/it] 46%|████▌ | 238/520 [15:25<17:28, 3.72s/it] {'loss': 8.9527, 'grad_norm': 2.5397319245373038e-05, 'learning_rate': 4.150690625070202, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:25<17:28, 3.72s/it] 46%|████▌ | 239/520 [15:29<17:23, 3.71s/it] {'loss': 9.7803, 'grad_norm': 2.7051371217699184e-05, 'learning_rate': 4.129241847776685, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:29<17:23, 3.71s/it] 46%|████▌ | 240/520 [15:33<17:22, 3.72s/it] {'loss': 8.5096, 'grad_norm': 3.0324464225212226e-05, 'learning_rate': 4.107768621834257, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:33<17:22, 3.72s/it] 46%|████▋ | 241/520 [15:36<17:18, 3.72s/it] {'loss': 8.8862, 'grad_norm': 2.6536419059300354e-05, 'learning_rate': 4.086271781566578, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:36<17:18, 3.72s/it] 47%|████▋ | 242/520 [15:40<17:10, 3.71s/it] {'loss': 9.1313, 'grad_norm': 2.1078526389196616e-05, 'learning_rate': 4.064752162214823, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:40<17:10, 3.71s/it] 47%|████▋ | 243/520 [15:44<17:06, 3.71s/it] {'loss': 8.7192, 'grad_norm': 2.4256351618377213e-05, 'learning_rate': 4.043210599905231, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:44<17:06, 3.71s/it] 47%|████▋ | 244/520 [15:47<17:03, 3.71s/it] {'loss': 9.4363, 'grad_norm': 1.944328467773749e-05, 'learning_rate': 4.02164793161661, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:47<17:03, 3.71s/it] 47%|████▋ | 245/520 [15:51<16:58, 3.70s/it] {'loss': 8.7703, 'grad_norm': 2.6515038902927747e-05, 'learning_rate': 4.00006499514783, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:51<16:58, 3.70s/it] 47%|████▋ | 246/520 [15:55<16:51, 3.69s/it] {'loss': 10.3695, 'grad_norm': 2.270056674168991e-05, 'learning_rate': 3.978462629085257, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:55<16:51, 3.69s/it] 48%|████▊ | 247/520 [15:58<16:46, 3.69s/it] {'loss': 9.8519, 'grad_norm': 2.5377651266008338e-05, 'learning_rate': 3.956841672770181, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:58<16:46, 3.69s/it] 48%|████▊ | 248/520 [16:02<16:39, 3.67s/it] {'loss': 8.8422, 'grad_norm': 2.2084250265193755e-05, 'learning_rate': 3.935202966266199, 'epoch': 0.48} + 48%|████▊ | 248/520 [16:02<16:39, 3.67s/it] 48%|████▊ | 249/520 [16:06<16:34, 3.67s/it] {'loss': 9.4732, 'grad_norm': 2.1081202466429283e-05, 'learning_rate': 3.913547350326575, 'epoch': 0.48} + 48%|████▊ | 249/520 [16:06<16:34, 3.67s/it] 48%|████▊ | 250/520 [16:09<16:28, 3.66s/it] {'loss': 9.3747, 'grad_norm': 2.4821700149281893e-05, 'learning_rate': 3.8918756663615772, 'epoch': 0.48} + 48%|████▊ | 250/520 [16:09<16:28, 3.66s/it] 48%|████▊ | 251/520 [16:13<16:28, 3.68s/it] {'loss': 9.6523, 'grad_norm': 2.0676421048748652e-05, 'learning_rate': 3.8701887564057826, 'epoch': 0.48} + 48%|████▊ | 251/520 [16:13<16:28, 3.68s/it] 48%|████▊ | 252/520 [16:17<16:21, 3.66s/it] {'loss': 9.9885, 'grad_norm': 3.566703003904019e-05, 'learning_rate': 3.8484874630853585, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:17<16:21, 3.66s/it] 49%|████▊ | 253/520 [16:20<16:21, 3.68s/it] {'loss': 9.6917, 'grad_norm': 2.5027790599624646e-05, 'learning_rate': 3.826772629585327, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:20<16:21, 3.68s/it] 49%|████▉ | 254/520 [16:24<16:17, 3.67s/it] {'loss': 8.9556, 'grad_norm': 2.2199492003117408e-05, 'learning_rate': 3.8050450996168044, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:24<16:17, 3.67s/it] 49%|████▉ | 255/520 [16:28<16:14, 3.68s/it] {'loss': 9.2667, 'grad_norm': 1.779273315806029e-05, 'learning_rate': 3.783305717384212, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:28<16:14, 3.68s/it] 49%|████▉ | 256/520 [16:31<16:10, 3.68s/it] {'loss': 9.2874, 'grad_norm': 2.1147591634343553e-05, 'learning_rate': 3.761555327552485, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:31<16:10, 3.68s/it] 49%|████▉ | 257/520 [16:35<16:03, 3.66s/it] {'loss': 9.2441, 'grad_norm': 1.4678795865738857e-05, 'learning_rate': 3.739794775214248, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:35<16:03, 3.66s/it] 50%|████▉ | 258/520 [16:39<15:57, 3.65s/it] {'loss': 9.2246, 'grad_norm': 2.1300969139396222e-05, 'learning_rate': 3.718024905856983, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:39<15:57, 3.65s/it] 50%|████▉ | 259/520 [16:42<15:56, 3.66s/it] {'loss': 9.7054, 'grad_norm': 2.1012734400032098e-05, 'learning_rate': 3.6962465653301715, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:42<15:56, 3.66s/it] 50%|█████ | 260/520 [16:46<15:55, 3.68s/it] {'loss': 10.0333, 'grad_norm': 3.340243188251997e-05, 'learning_rate': 3.67446059981244, 'epoch': 0.5} + 50%|█████ | 260/520 [16:46<15:55, 3.68s/it] 50%|█████ | 261/520 [16:50<15:51, 3.67s/it] {'loss': 10.1538, 'grad_norm': 2.7597698193903988e-05, 'learning_rate': 3.6526678557786765, 'epoch': 0.5} + 50%|█████ | 261/520 [16:50<15:51, 3.67s/it] 50%|█████ | 262/520 [16:54<15:52, 3.69s/it] {'loss': 9.1035, 'grad_norm': 2.41413155150405e-05, 'learning_rate': 3.6308691799671404, 'epoch': 0.5} + 50%|█████ | 262/520 [16:54<15:52, 3.69s/it] 51%|█████ | 263/520 [16:57<15:47, 3.69s/it] {'loss': 10.2463, 'grad_norm': 3.8685981333913395e-05, 'learning_rate': 3.609065419346566, 'epoch': 0.51} + 51%|█████ | 263/520 [16:57<15:47, 3.69s/it] 51%|█████ | 264/520 [17:01<15:43, 3.69s/it] {'loss': 9.4835, 'grad_norm': 2.5427091986028805e-05, 'learning_rate': 3.5872574210832555, 'epoch': 0.51} + 51%|█████ | 264/520 [17:01<15:43, 3.69s/it] 51%|█████ | 265/520 [17:05<15:41, 3.69s/it] {'loss': 9.3512, 'grad_norm': 2.1854513263005554e-05, 'learning_rate': 3.5654460325081576, 'epoch': 0.51} + 51%|█████ | 265/520 [17:05<15:41, 3.69s/it] 51%|█████ | 266/520 [17:08<15:37, 3.69s/it] {'loss': 8.2233, 'grad_norm': 3.0288683412416883e-05, 'learning_rate': 3.543632101083953, 'epoch': 0.51} + 51%|█████ | 266/520 [17:08<15:37, 3.69s/it] 51%|█████▏ | 267/520 [17:12<15:32, 3.69s/it] {'loss': 9.0251, 'grad_norm': 1.822414000895682e-05, 'learning_rate': 3.5218164743721174, 'epoch': 0.51} + 51%|█████▏ | 267/520 [17:12<15:32, 3.69s/it] 52%|█████▏ | 268/520 [17:16<15:33, 3.70s/it] {'loss': 10.6392, 'grad_norm': 2.4917174296224513e-05, 'learning_rate': 3.5, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:16<15:33, 3.70s/it] 52%|█████▏ | 269/520 [17:19<15:27, 3.70s/it] {'loss': 9.4758, 'grad_norm': 2.2133986551739535e-05, 'learning_rate': 3.4781835256278826, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:19<15:27, 3.70s/it] 52%|█████▏ | 270/520 [17:23<15:26, 3.70s/it] {'loss': 9.5525, 'grad_norm': 4.150571323018238e-05, 'learning_rate': 3.4563678989160476, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:23<15:26, 3.70s/it] 52%|█████▏ | 271/520 [17:27<15:32, 3.74s/it] {'loss': 9.8945, 'grad_norm': 3.583365067894869e-05, 'learning_rate': 3.434553967491843, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:27<15:32, 3.74s/it] 52%|█████▏ | 272/520 [17:31<15:36, 3.78s/it] {'loss': 10.2471, 'grad_norm': 4.222482851427223e-05, 'learning_rate': 3.4127425789167454, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:31<15:36, 3.78s/it] 52%|█████▎ | 273/520 [17:35<15:29, 3.76s/it] {'loss': 10.3456, 'grad_norm': 3.0171994610700203e-05, 'learning_rate': 3.390934580653435, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:35<15:29, 3.76s/it] 53%|█████▎ | 274/520 [17:38<15:17, 3.73s/it] {'loss': 8.9731, 'grad_norm': 3.2077227245452285e-05, 'learning_rate': 3.3691308200328605, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:38<15:17, 3.73s/it] 53%|█████▎ | 275/520 [17:42<15:16, 3.74s/it] {'loss': 9.2233, 'grad_norm': 1.6126774974666594e-05, 'learning_rate': 3.3473321442213244, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:42<15:16, 3.74s/it] 53%|█████▎ | 276/520 [17:46<15:19, 3.77s/it] {'loss': 9.7237, 'grad_norm': 3.044132803045212e-05, 'learning_rate': 3.3255394001875596, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:46<15:19, 3.77s/it] 53%|█████▎ | 277/520 [17:50<15:19, 3.79s/it] {'loss': 10.33, 'grad_norm': 2.6950708425681542e-05, 'learning_rate': 3.3037534346698285, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:50<15:19, 3.79s/it] 53%|█████▎ | 278/520 [17:54<15:23, 3.82s/it] {'loss': 8.3867, 'grad_norm': 4.051853610327512e-05, 'learning_rate': 3.2819750941430175, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:54<15:23, 3.82s/it] 54%|█████▎ | 279/520 [17:57<15:12, 3.79s/it] {'loss': 9.6609, 'grad_norm': 2.863583674920627e-05, 'learning_rate': 3.260205224785752, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:57<15:12, 3.79s/it] 54%|█████▍ | 280/520 [18:01<15:04, 3.77s/it] {'loss': 9.0123, 'grad_norm': 2.977300910766299e-05, 'learning_rate': 3.2384446724475153, 'epoch': 0.54} + 54%|█████▍ | 280/520 [18:01<15:04, 3.77s/it] 54%|█████▍ | 281/520 [18:05<15:02, 3.78s/it] {'loss': 9.4726, 'grad_norm': 2.676982431950749e-05, 'learning_rate': 3.216694282615788, 'epoch': 0.54} + 54%|█████▍ | 281/520 [18:05<15:02, 3.78s/it] 54%|█████▍ | 282/520 [18:09<15:00, 3.78s/it] {'loss': 8.5081, 'grad_norm': 3.4856478153828014e-05, 'learning_rate': 3.194954900383196, 'epoch': 0.54} + 54%|█████▍ | 282/520 [18:09<15:00, 3.78s/it] 54%|█████▍ | 283/520 [18:12<14:51, 3.76s/it] {'loss': 9.6332, 'grad_norm': 1.784223948157714e-05, 'learning_rate': 3.173227370414673, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:12<14:51, 3.76s/it] 55%|█████▍ | 284/520 [18:16<14:46, 3.76s/it] {'loss': 9.992, 'grad_norm': 2.726473677762167e-05, 'learning_rate': 3.151512536914642, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:16<14:46, 3.76s/it] 55%|█████▍ | 285/520 [18:20<14:38, 3.74s/it] {'loss': 8.9993, 'grad_norm': 1.610710545854138e-05, 'learning_rate': 3.1298112435942183, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:20<14:38, 3.74s/it] 55%|█████▌ | 286/520 [18:23<14:37, 3.75s/it] {'loss': 8.764, 'grad_norm': 2.143147928627823e-05, 'learning_rate': 3.1081243336384228, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:23<14:37, 3.75s/it] 55%|█████▌ | 287/520 [18:27<14:32, 3.74s/it] {'loss': 9.2098, 'grad_norm': 1.3807407818206671e-05, 'learning_rate': 3.0864526496734253, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:27<14:32, 3.74s/it] 55%|█████▌ | 288/520 [18:31<14:31, 3.76s/it] {'loss': 9.9808, 'grad_norm': 2.6516485822192637e-05, 'learning_rate': 3.064797033733803, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:31<14:31, 3.76s/it] 56%|█████▌ | 289/520 [18:35<14:28, 3.76s/it] {'loss': 9.089, 'grad_norm': 2.2707345690020576e-05, 'learning_rate': 3.04315832722982, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:35<14:28, 3.76s/it] 56%|█████▌ | 290/520 [18:38<14:19, 3.74s/it] {'loss': 8.7023, 'grad_norm': 2.397347363069366e-05, 'learning_rate': 3.0215373709147437, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:38<14:19, 3.74s/it] 56%|█████▌ | 291/520 [18:42<14:13, 3.73s/it] {'loss': 8.8018, 'grad_norm': 2.629410211133861e-05, 'learning_rate': 2.9999350048521705, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:42<14:13, 3.73s/it] 56%|█████▌ | 292/520 [18:46<14:10, 3.73s/it] {'loss': 9.4336, 'grad_norm': 2.7464710448842663e-05, 'learning_rate': 2.978352068383389, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:46<14:10, 3.73s/it] 56%|█████▋ | 293/520 [18:50<14:15, 3.77s/it] {'loss': 8.9984, 'grad_norm': 2.045519688100182e-05, 'learning_rate': 2.9567894000947694, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:50<14:15, 3.77s/it] 57%|█████▋ | 294/520 [18:54<14:17, 3.80s/it] {'loss': 9.4205, 'grad_norm': 2.0667006592587333e-05, 'learning_rate': 2.9352478377851767, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:54<14:17, 3.80s/it] 57%|█████▋ | 295/520 [18:57<14:16, 3.81s/it] {'loss': 10.2257, 'grad_norm': 2.0564800112620654e-05, 'learning_rate': 2.913728218433423, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:57<14:16, 3.81s/it] 57%|█████▋ | 296/520 [19:01<14:13, 3.81s/it] {'loss': 8.5356, 'grad_norm': 2.6905885020971077e-05, 'learning_rate': 2.892231378165744, 'epoch': 0.57} + 57%|█████▋ | 296/520 [19:01<14:13, 3.81s/it] 57%|█████▋ | 297/520 [19:05<14:13, 3.83s/it] {'loss': 9.5195, 'grad_norm': 2.7178297763937733e-05, 'learning_rate': 2.8707581522233157, 'epoch': 0.57} + 57%|█████▋ | 297/520 [19:05<14:13, 3.83s/it] 57%|█████▋ | 298/520 [19:09<14:07, 3.82s/it] {'loss': 9.1511, 'grad_norm': 3.5738478059248e-05, 'learning_rate': 2.849309374929799, 'epoch': 0.57} + 57%|█████▋ | 298/520 [19:09<14:07, 3.82s/it] 57%|█████▊ | 299/520 [19:13<14:05, 3.82s/it] {'loss': 10.2255, 'grad_norm': 2.7583764312200596e-05, 'learning_rate': 2.8278858796589237, 'epoch': 0.57} + 57%|█████▊ | 299/520 [19:13<14:05, 3.82s/it] 58%|█████▊ | 300/520 [19:17<14:02, 3.83s/it] {'loss': 9.5442, 'grad_norm': 1.899716198162084e-05, 'learning_rate': 2.8064884988021093, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:17<14:02, 3.83s/it] 58%|█████▊ | 301/520 [19:20<14:00, 3.84s/it] {'loss': 9.2712, 'grad_norm': 1.926489607778931e-05, 'learning_rate': 2.7851180637361193, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:20<14:00, 3.84s/it] 58%|█████▊ | 302/520 [19:24<14:00, 3.86s/it] {'loss': 10.2014, 'grad_norm': 2.2146599935313285e-05, 'learning_rate': 2.7637754047907652, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:24<14:00, 3.86s/it] 58%|█████▊ | 303/520 [19:28<13:54, 3.85s/it] {'loss': 8.9278, 'grad_norm': 1.5217903177468646e-05, 'learning_rate': 2.7424613512166403, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:28<13:54, 3.85s/it] 58%|█████▊ | 304/520 [19:32<13:52, 3.86s/it] {'loss': 10.0625, 'grad_norm': 2.9332168327383848e-05, 'learning_rate': 2.7211767311529, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:32<13:52, 3.86s/it] 59%|█████▊ | 305/520 [19:36<13:41, 3.82s/it] {'loss': 9.8325, 'grad_norm': 1.9807629106972095e-05, 'learning_rate': 2.699922371595087, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:36<13:41, 3.82s/it] 59%|█████▉ | 306/520 [19:40<13:34, 3.81s/it] {'loss': 9.5701, 'grad_norm': 2.0165035645122858e-05, 'learning_rate': 2.6786990983629977, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:40<13:34, 3.81s/it] 59%|█████▉ | 307/520 [19:43<13:23, 3.77s/it] {'loss': 9.1506, 'grad_norm': 1.9209200588152142e-05, 'learning_rate': 2.6575077360685952, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:43<13:23, 3.77s/it] 59%|█████▉ | 308/520 [19:47<13:17, 3.76s/it] {'loss': 9.404, 'grad_norm': 1.8895863037392166e-05, 'learning_rate': 2.636349108083972, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:47<13:17, 3.76s/it] 59%|█████▉ | 309/520 [19:52<14:02, 3.99s/it] {'loss': 8.9465, 'grad_norm': 2.1800124139949898e-05, 'learning_rate': 2.615224036509358, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:52<14:02, 3.99s/it] 60%|█████▉ | 310/520 [19:55<13:42, 3.91s/it] {'loss': 9.062, 'grad_norm': 2.566665790561017e-05, 'learning_rate': 2.594133342141177, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:55<13:42, 3.91s/it] 60%|█████▉ | 311/520 [19:59<13:28, 3.87s/it] {'loss': 9.222, 'grad_norm': 1.7388261841255018e-05, 'learning_rate': 2.5730778444401543, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:59<13:28, 3.87s/it] 60%|██████ | 312/520 [20:03<13:43, 3.96s/it] {'loss': 8.9207, 'grad_norm': 2.0253604371139954e-05, 'learning_rate': 2.5520583614994825, 'epoch': 0.6} + 60%|██████ | 312/520 [20:03<13:43, 3.96s/it] 60%|██████ | 313/520 [20:07<13:28, 3.91s/it] {'loss': 8.2338, 'grad_norm': 3.553360139041388e-05, 'learning_rate': 2.5310757100130274, 'epoch': 0.6} + 60%|██████ | 313/520 [20:07<13:28, 3.91s/it] 60%|██████ | 314/520 [20:11<13:36, 3.96s/it] {'loss': 9.0921, 'grad_norm': 1.5520943530586212e-05, 'learning_rate': 2.5101307052436037, 'epoch': 0.6} + 60%|██████ | 314/520 [20:11<13:36, 3.96s/it] 61%|██████ | 315/520 [20:15<13:17, 3.89s/it] {'loss': 10.3172, 'grad_norm': 2.0235070099992036e-05, 'learning_rate': 2.489224160991296, 'epoch': 0.61} + 61%|██████ | 315/520 [20:15<13:17, 3.89s/it] 61%|██████ | 316/520 [20:18<13:00, 3.82s/it] {'loss': 8.9438, 'grad_norm': 1.616445986837258e-05, 'learning_rate': 2.468356889561835, 'epoch': 0.61} + 61%|██████ | 316/520 [20:18<13:00, 3.82s/it] 61%|██████ | 317/520 [20:22<12:49, 3.79s/it] {'loss': 8.3365, 'grad_norm': 3.1679252082938636e-05, 'learning_rate': 2.4475297017350446, 'epoch': 0.61} + 61%|██████ | 317/520 [20:22<12:49, 3.79s/it] 61%|██████ | 318/520 [20:26<12:42, 3.78s/it] {'loss': 9.8607, 'grad_norm': 2.1964327837326813e-05, 'learning_rate': 2.4267434067333307, 'epoch': 0.61} + 61%|██████ | 318/520 [20:26<12:42, 3.78s/it] 61%|██████▏ | 319/520 [20:30<12:59, 3.88s/it] {'loss': 8.7699, 'grad_norm': 3.1145232131883005e-05, 'learning_rate': 2.4059988121902447, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:30<12:59, 3.88s/it] 62%|██████▏ | 320/520 [20:34<12:48, 3.84s/it] {'loss': 9.1211, 'grad_norm': 1.7193029060745397e-05, 'learning_rate': 2.385296724119105, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:34<12:48, 3.84s/it] 62%|██████▏ | 321/520 [20:38<12:37, 3.81s/it] {'loss': 9.2873, 'grad_norm': 1.8506683541986832e-05, 'learning_rate': 2.3646379468816754, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:38<12:37, 3.81s/it] 62%|██████▏ | 322/520 [20:41<12:31, 3.80s/it] {'loss': 10.0344, 'grad_norm': 2.3708046949058055e-05, 'learning_rate': 2.3440232831569165, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:41<12:31, 3.80s/it] 62%|██████▏ | 323/520 [20:45<12:24, 3.78s/it] {'loss': 10.0825, 'grad_norm': 2.1648553103292466e-05, 'learning_rate': 2.323453533909793, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:45<12:24, 3.78s/it] 62%|██████▏ | 324/520 [20:49<12:16, 3.76s/it] {'loss': 9.0331, 'grad_norm': 1.7864114960623842e-05, 'learning_rate': 2.3029294983601596, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:49<12:16, 3.76s/it] 62%|██████▎ | 325/520 [20:52<12:11, 3.75s/it] {'loss': 9.3728, 'grad_norm': 1.6887729480249663e-05, 'learning_rate': 2.282451973951704, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:52<12:11, 3.75s/it] 63%|██████▎ | 326/520 [20:56<12:10, 3.76s/it] {'loss': 9.4163, 'grad_norm': 1.5682811542313924e-05, 'learning_rate': 2.2620217563209652, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:56<12:10, 3.76s/it] 63%|██████▎ | 327/520 [21:00<12:05, 3.76s/it] {'loss': 10.1396, 'grad_norm': 2.5708738592255205e-05, 'learning_rate': 2.2416396392664137, 'epoch': 0.63} + 63%|██████▎ | 327/520 [21:00<12:05, 3.76s/it] 63%|██████▎ | 328/520 [21:04<11:57, 3.74s/it] {'loss': 9.6247, 'grad_norm': 1.5819374701610026e-05, 'learning_rate': 2.2213064147176174, 'epoch': 0.63} + 63%|██████▎ | 328/520 [21:04<11:57, 3.74s/it] 63%|██████▎ | 329/520 [21:07<11:52, 3.73s/it] {'loss': 8.5858, 'grad_norm': 2.5206231057622643e-05, 'learning_rate': 2.2010228727044674, 'epoch': 0.63} + 63%|██████▎ | 329/520 [21:07<11:52, 3.73s/it] 63%|██████▎ | 330/520 [21:11<11:48, 3.73s/it] {'loss': 9.2969, 'grad_norm': 1.551649969858218e-05, 'learning_rate': 2.1807898013264815, 'epoch': 0.63} + 63%|██████▎ | 330/520 [21:11<11:48, 3.73s/it] 64%|██████▎ | 331/520 [21:15<11:43, 3.72s/it] {'loss': 9.2247, 'grad_norm': 1.6639799765125444e-05, 'learning_rate': 2.160607986722186, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:15<11:43, 3.72s/it] 64%|██████▍ | 332/520 [21:19<11:37, 3.71s/it] {'loss': 10.1311, 'grad_norm': 1.9819828224929714e-05, 'learning_rate': 2.1404782130385684, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:19<11:37, 3.71s/it] 64%|██████▍ | 333/520 [21:22<11:35, 3.72s/it] {'loss': 9.8777, 'grad_norm': 2.060641647379231e-05, 'learning_rate': 2.1204012624006126, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:22<11:35, 3.72s/it] 64%|██████▍ | 334/520 [21:26<11:33, 3.73s/it] {'loss': 9.0785, 'grad_norm': 1.7986903979981166e-05, 'learning_rate': 2.100377914880907, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:26<11:33, 3.73s/it] 64%|██████▍ | 335/520 [21:30<11:32, 3.74s/it] {'loss': 9.1824, 'grad_norm': 1.4263173559804263e-05, 'learning_rate': 2.080408948469338, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:30<11:32, 3.74s/it] 65%|██████▍ | 336/520 [21:34<11:30, 3.76s/it] {'loss': 8.9905, 'grad_norm': 1.4664555216046301e-05, 'learning_rate': 2.0604951390428603, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:34<11:30, 3.76s/it] 65%|██████▍ | 337/520 [21:37<11:30, 3.77s/it] {'loss': 9.1867, 'grad_norm': 2.007095929015885e-05, 'learning_rate': 2.0406372603353526, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:37<11:30, 3.77s/it] 65%|██████▌ | 338/520 [21:41<11:28, 3.78s/it] {'loss': 9.1689, 'grad_norm': 1.1469874438422636e-05, 'learning_rate': 2.0208360839075525, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:41<11:28, 3.78s/it] 65%|██████▌ | 339/520 [21:45<11:23, 3.78s/it] {'loss': 9.4843, 'grad_norm': 1.6635077269311174e-05, 'learning_rate': 2.00109237911708, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:45<11:23, 3.78s/it] 65%|██████▌ | 340/520 [21:49<11:16, 3.76s/it] {'loss': 8.9393, 'grad_norm': 1.8791451794619047e-05, 'learning_rate': 1.9814069130885468, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:49<11:16, 3.76s/it] 66%|██████▌ | 341/520 [21:52<11:12, 3.76s/it] {'loss': 9.2195, 'grad_norm': 3.4542911132920883e-05, 'learning_rate': 1.9617804506837422, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:52<11:12, 3.76s/it] 66%|██████▌ | 342/520 [21:56<11:07, 3.75s/it] {'loss': 10.3915, 'grad_norm': 1.909944035917687e-05, 'learning_rate': 1.9422137544719265, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:56<11:07, 3.75s/it] 66%|██████▌ | 343/520 [22:00<11:05, 3.76s/it] {'loss': 9.913, 'grad_norm': 2.3867418287597803e-05, 'learning_rate': 1.922707584700191, 'epoch': 0.66} + 66%|██████▌ | 343/520 [22:00<11:05, 3.76s/it] 66%|██████▌ | 344/520 [22:04<11:00, 3.75s/it] {'loss': 8.8917, 'grad_norm': 1.545423571921113e-05, 'learning_rate': 1.9032626992639294, 'epoch': 0.66} + 66%|██████▌ | 344/520 [22:04<11:00, 3.75s/it] 66%|██████▋ | 345/520 [22:07<10:55, 3.75s/it] {'loss': 9.3058, 'grad_norm': 1.966247622686348e-05, 'learning_rate': 1.883879853677382, 'epoch': 0.66} + 66%|██████▋ | 345/520 [22:07<10:55, 3.75s/it] 67%|██████▋ | 346/520 [22:11<10:53, 3.76s/it] {'loss': 10.0892, 'grad_norm': 3.01239282209579e-05, 'learning_rate': 1.8645598010442828, 'epoch': 0.67} + 67%|██████▋ | 346/520 [22:11<10:53, 3.76s/it] 67%|██████▋ | 347/520 [22:15<10:48, 3.75s/it] {'loss': 8.3959, 'grad_norm': 2.3501109048389263e-05, 'learning_rate': 1.845303292028606, 'epoch': 0.67} + 67%|██████▋ | 347/520 [22:15<10:48, 3.75s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:19<10:43, 3.74s/it] {'loss': 9.5408, 'grad_norm': 3.1045627506693436e-05, 'learning_rate': 1.8261110748253873, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:19<10:43, 3.74s/it] 67%|██████▋ | 349/520 [22:22<10:38, 3.73s/it] {'loss': 9.7488, 'grad_norm': 2.0908352828538308e-05, 'learning_rate': 1.8069838951316606, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:22<10:38, 3.73s/it] 67%|██████▋ | 350/520 [22:26<10:33, 3.73s/it] {'loss': 9.2615, 'grad_norm': 1.6471623011523568e-05, 'learning_rate': 1.7879224961174887, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:26<10:33, 3.73s/it] 68%|██████▊ | 351/520 [22:30<10:30, 3.73s/it] {'loss': 8.8677, 'grad_norm': 2.2444286897771254e-05, 'learning_rate': 1.7689276183970741, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:30<10:30, 3.73s/it] 68%|██████▊ | 352/520 [22:34<10:27, 3.73s/it] {'loss': 9.2716, 'grad_norm': 1.7157053128812935e-05, 'learning_rate': 1.7500000000000009, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:34<10:27, 3.73s/it] 68%|██████▊ | 353/520 [22:37<10:23, 3.74s/it] {'loss': 9.6629, 'grad_norm': 2.9668443747018004e-05, 'learning_rate': 1.7311403763425435, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:37<10:23, 3.74s/it] 68%|██████▊ | 354/520 [22:41<10:17, 3.72s/it] {'loss': 10.1877, 'grad_norm': 2.29677935156991e-05, 'learning_rate': 1.7123494801991013, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:41<10:17, 3.72s/it] 68%|██████▊ | 355/520 [22:45<10:13, 3.72s/it] {'loss': 8.9613, 'grad_norm': 2.0652792260129434e-05, 'learning_rate': 1.6936280416737264, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:45<10:13, 3.72s/it] 68%|██████▊ | 356/520 [22:48<10:05, 3.69s/it] {'loss': 9.2101, 'grad_norm': 1.6218977798534696e-05, 'learning_rate': 1.674976788171757, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:48<10:05, 3.69s/it] 69%|██████▊ | 357/520 [22:52<10:03, 3.70s/it] {'loss': 8.537, 'grad_norm': 2.4781507634246726e-05, 'learning_rate': 1.6563964443715473, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:52<10:03, 3.70s/it] 69%|██████▉ | 358/520 [22:56<09:58, 3.69s/it] {'loss': 8.964, 'grad_norm': 1.6661898113706812e-05, 'learning_rate': 1.6378877321963223, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:56<09:58, 3.69s/it] 69%|██████▉ | 359/520 [22:59<09:55, 3.70s/it] {'loss': 9.9776, 'grad_norm': 2.012314938352977e-05, 'learning_rate': 1.619451370786116, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:59<09:55, 3.70s/it] 69%|██████▉ | 360/520 [23:03<09:51, 3.70s/it] {'loss': 10.3439, 'grad_norm': 2.3280207350124105e-05, 'learning_rate': 1.6010880764698423, 'epoch': 0.69} + 69%|██████▉ | 360/520 [23:03<09:51, 3.70s/it] 69%|██████▉ | 361/520 [23:07<09:49, 3.71s/it] {'loss': 10.0091, 'grad_norm': 2.465431898517352e-05, 'learning_rate': 1.5827985627374508, 'epoch': 0.69} + 69%|██████▉ | 361/520 [23:07<09:49, 3.71s/it] 70%|██████▉ | 362/520 [23:11<09:44, 3.70s/it] {'loss': 8.9734, 'grad_norm': 1.3997005552211038e-05, 'learning_rate': 1.564583540212212, 'epoch': 0.7} + 70%|██████▉ | 362/520 [23:11<09:44, 3.70s/it] 70%|██████▉ | 363/520 [23:14<09:42, 3.71s/it] {'loss': 9.4089, 'grad_norm': 1.722646523530266e-05, 'learning_rate': 1.5464437166231066, 'epoch': 0.7} + 70%|██████▉ | 363/520 [23:14<09:42, 3.71s/it] 70%|███████ | 364/520 [23:18<09:38, 3.71s/it] {'loss': 10.2408, 'grad_norm': 2.8828159331355085e-05, 'learning_rate': 1.5283797967773227, 'epoch': 0.7} + 70%|███████ | 364/520 [23:18<09:38, 3.71s/it] 70%|███████ | 365/520 [23:22<09:35, 3.72s/it] {'loss': 9.5662, 'grad_norm': 1.4738642957988737e-05, 'learning_rate': 1.5103924825328772, 'epoch': 0.7} + 70%|███████ | 365/520 [23:22<09:35, 3.72s/it] 70%|███████ | 366/520 [23:25<09:33, 3.72s/it] {'loss': 9.3581, 'grad_norm': 1.4331448740882964e-05, 'learning_rate': 1.4924824727713397, 'epoch': 0.7} + 70%|███████ | 366/520 [23:25<09:33, 3.72s/it] 71%|███████ | 367/520 [23:29<09:28, 3.71s/it] {'loss': 9.5992, 'grad_norm': 1.9780544313529962e-05, 'learning_rate': 1.47465046337068, 'epoch': 0.71} + 71%|███████ | 367/520 [23:29<09:28, 3.71s/it] 71%|███████ | 368/520 [23:33<09:24, 3.71s/it] {'loss': 8.8671, 'grad_norm': 1.4799258862495264e-05, 'learning_rate': 1.4568971471782364, 'epoch': 0.71} + 71%|███████ | 368/520 [23:33<09:24, 3.71s/it] 71%|███████ | 369/520 [23:37<09:19, 3.71s/it] {'loss': 9.4986, 'grad_norm': 3.548658444574988e-05, 'learning_rate': 1.4392232139837837, 'epoch': 0.71} + 71%|███████ | 369/520 [23:37<09:19, 3.71s/it] 71%|███████ | 370/520 [23:40<09:13, 3.69s/it] {'loss': 9.0137, 'grad_norm': 1.180374241081682e-05, 'learning_rate': 1.4216293504927449, 'epoch': 0.71} + 71%|███████ | 370/520 [23:40<09:13, 3.69s/it] 71%|███████▏ | 371/520 [23:44<09:11, 3.70s/it] {'loss': 9.2894, 'grad_norm': 1.8752159043564495e-05, 'learning_rate': 1.404116240299499, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:44<09:11, 3.70s/it] 72%|███████▏ | 372/520 [23:48<09:07, 3.70s/it] {'loss': 10.3041, 'grad_norm': 2.0051192839089113e-05, 'learning_rate': 1.3866845638608285, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:48<09:07, 3.70s/it] 72%|███████▏ | 373/520 [23:51<09:05, 3.71s/it] {'loss': 10.0666, 'grad_norm': 2.4653457550010578e-05, 'learning_rate': 1.3693349984694776, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:51<09:05, 3.71s/it] 72%|███████▏ | 374/520 [23:55<09:07, 3.75s/it] {'loss': 9.1121, 'grad_norm': 1.3548481940462649e-05, 'learning_rate': 1.3520682182278345, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:55<09:07, 3.75s/it] 72%|███████▏ | 375/520 [23:59<09:09, 3.79s/it] {'loss': 8.7138, 'grad_norm': 1.4531278606013277e-05, 'learning_rate': 1.3348848940217413, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:59<09:09, 3.79s/it] 72%|███████▏ | 376/520 [24:03<09:08, 3.81s/it] {'loss': 9.1547, 'grad_norm': 1.4114173593828369e-05, 'learning_rate': 1.3177856934944328, 'epoch': 0.72} + 72%|███████▏ | 376/520 [24:03<09:08, 3.81s/it] 72%|███████▎ | 377/520 [24:07<09:06, 3.82s/it] {'loss': 9.1846, 'grad_norm': 1.3796037196231451e-05, 'learning_rate': 1.3007712810205845, 'epoch': 0.72} + 72%|███████▎ | 377/520 [24:07<09:06, 3.82s/it] 73%|███████▎ | 378/520 [24:11<08:57, 3.79s/it] {'loss': 9.507, 'grad_norm': 1.650733786153466e-05, 'learning_rate': 1.2838423176805112, 'epoch': 0.73} + 73%|███████▎ | 378/520 [24:11<08:57, 3.79s/it] 73%|███████▎ | 379/520 [24:14<08:50, 3.77s/it] {'loss': 9.4894, 'grad_norm': 1.66100650458336e-05, 'learning_rate': 1.2669994612344704, 'epoch': 0.73} + 73%|███████▎ | 379/520 [24:14<08:50, 3.77s/it] 73%|███████▎ | 380/520 [24:18<08:43, 3.74s/it] {'loss': 10.1395, 'grad_norm': 2.2655154591254012e-05, 'learning_rate': 1.2502433660971122, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:18<08:43, 3.74s/it] 73%|███████▎ | 381/520 [24:22<08:42, 3.76s/it] {'loss': 9.3751, 'grad_norm': 1.7355335202399687e-05, 'learning_rate': 1.233574683312054, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:22<08:42, 3.76s/it] 73%|███████▎ | 382/520 [24:25<08:37, 3.75s/it] {'loss': 10.2675, 'grad_norm': 2.3989776757743992e-05, 'learning_rate': 1.216994060526577, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:25<08:37, 3.75s/it] 74%|███████▎ | 383/520 [24:29<08:32, 3.74s/it] {'loss': 8.9018, 'grad_norm': 3.4157313891077733e-05, 'learning_rate': 1.2005021419664688, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:29<08:32, 3.74s/it] 74%|███████▍ | 384/520 [24:33<08:28, 3.74s/it] {'loss': 11.171, 'grad_norm': 6.262414390637305e-05, 'learning_rate': 1.184099568410993, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:33<08:28, 3.74s/it] 74%|███████▍ | 385/520 [24:37<08:24, 3.74s/it] {'loss': 9.2326, 'grad_norm': 7.048221915731631e-05, 'learning_rate': 1.1677869771679863, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:37<08:24, 3.74s/it] 74%|███████▍ | 386/520 [24:40<08:20, 3.73s/it] {'loss': 8.6407, 'grad_norm': 2.0979698222831915e-05, 'learning_rate': 1.1515650020491053, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:40<08:20, 3.73s/it] 74%|███████▍ | 387/520 [24:44<08:17, 3.74s/it] {'loss': 10.618, 'grad_norm': 1.6203693223051073e-05, 'learning_rate': 1.1354342733451892, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:44<08:17, 3.74s/it] 75%|███████▍ | 388/520 [24:48<08:13, 3.74s/it] {'loss': 8.9038, 'grad_norm': 1.2950693628372793e-05, 'learning_rate': 1.1193954178017815, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:48<08:13, 3.74s/it] 75%|███████▍ | 389/520 [24:52<08:10, 3.74s/it] {'loss': 9.4319, 'grad_norm': 1.8894252483698034e-05, 'learning_rate': 1.1034490585947727, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:52<08:10, 3.74s/it] 75%|███████▌ | 390/520 [24:55<08:06, 3.74s/it] {'loss': 9.1809, 'grad_norm': 1.396591760995035e-05, 'learning_rate': 1.0875958153061855, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:55<08:06, 3.74s/it] 75%|███████▌ | 391/520 [24:59<08:03, 3.75s/it] {'loss': 9.7088, 'grad_norm': 1.884675007786905e-05, 'learning_rate': 1.0718363039001042, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:59<08:03, 3.75s/it] 75%|███████▌ | 392/520 [25:03<07:58, 3.74s/it] {'loss': 8.8571, 'grad_norm': 1.4424616270301762e-05, 'learning_rate': 1.0561711366987454, 'epoch': 0.75} + 75%|███████▌ | 392/520 [25:03<07:58, 3.74s/it] 76%|███████▌ | 393/520 [25:07<07:54, 3.74s/it] {'loss': 9.716, 'grad_norm': 2.6219849623233388e-05, 'learning_rate': 1.0406009223586579, 'epoch': 0.76} + 76%|███████▌ | 393/520 [25:07<07:54, 3.74s/it] 76%|███████▌ | 394/520 [25:10<07:49, 3.73s/it] {'loss': 9.1467, 'grad_norm': 1.614348309016865e-05, 'learning_rate': 1.0251262658470839, 'epoch': 0.76} + 76%|███████▌ | 394/520 [25:10<07:49, 3.73s/it] 76%|███████▌ | 395/520 [25:14<07:45, 3.72s/it] {'loss': 9.0383, 'grad_norm': 2.1940791981374355e-05, 'learning_rate': 1.0097477684184453, 'epoch': 0.76} + 76%|███████▌ | 395/520 [25:14<07:45, 3.72s/it] 76%|███████▌ | 396/520 [25:18<07:41, 3.72s/it] {'loss': 9.396, 'grad_norm': 2.5807285456260895e-05, 'learning_rate': 0.9944660275909855, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:18<07:41, 3.72s/it] 76%|███████▋ | 397/520 [25:21<07:37, 3.72s/it] {'loss': 9.3811, 'grad_norm': 2.8505947922127774e-05, 'learning_rate': 0.9792816371235576, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:21<07:37, 3.72s/it] 77%|███████▋ | 398/520 [25:25<07:33, 3.71s/it] {'loss': 9.5715, 'grad_norm': 1.9710970337607586e-05, 'learning_rate': 0.9641951869925457, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:25<07:33, 3.71s/it] 77%|███████▋ | 399/520 [25:29<07:36, 3.77s/it] {'loss': 9.9812, 'grad_norm': 3.014488821348026e-05, 'learning_rate': 0.9492072633689508, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:29<07:36, 3.77s/it] 77%|███████▋ | 400/520 [25:33<07:41, 3.85s/it] {'loss': 9.7824, 'grad_norm': 2.4904375253935585e-05, 'learning_rate': 0.9343184485956086, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:33<07:41, 3.85s/it] 77%|███████▋ | 401/520 [25:37<07:39, 3.87s/it] {'loss': 8.193, 'grad_norm': 2.137549059579247e-05, 'learning_rate': 0.9195293211645661, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:37<07:39, 3.87s/it] 77%|███████▋ | 402/520 [25:41<07:39, 3.90s/it] {'loss': 8.7448, 'grad_norm': 1.3407519140804452e-05, 'learning_rate': 0.9048404556946064, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:41<07:39, 3.90s/it] 78%|███████▊ | 403/520 [25:45<07:37, 3.91s/it] {'loss': 8.9783, 'grad_norm': 1.580027665757923e-05, 'learning_rate': 0.8902524229089204, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:45<07:37, 3.91s/it] 78%|███████▊ | 404/520 [25:49<07:33, 3.91s/it] {'loss': 8.875, 'grad_norm': 1.9363971971546696e-05, 'learning_rate': 0.8757657896129298, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:49<07:33, 3.91s/it] 78%|███████▊ | 405/520 [25:53<07:32, 3.93s/it] {'loss': 9.6826, 'grad_norm': 2.2524463045018032e-05, 'learning_rate': 0.8613811186722706, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:53<07:32, 3.93s/it] 78%|███████▊ | 406/520 [25:56<07:19, 3.86s/it] {'loss': 9.706, 'grad_norm': 2.837418735181795e-05, 'learning_rate': 0.8470989689909141, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:56<07:19, 3.86s/it] 78%|███████▊ | 407/520 [26:00<07:12, 3.83s/it] {'loss': 9.8866, 'grad_norm': 2.1176199527791047e-05, 'learning_rate': 0.8329198954894622, 'epoch': 0.78} + 78%|███████▊ | 407/520 [26:00<07:12, 3.83s/it] 78%|███████▊ | 408/520 [26:04<07:03, 3.78s/it] {'loss': 9.0055, 'grad_norm': 1.5724361930259943e-05, 'learning_rate': 0.8188444490835773, 'epoch': 0.78} + 78%|███████▊ | 408/520 [26:04<07:03, 3.78s/it] 79%|███████▊ | 409/520 [26:08<06:57, 3.76s/it] {'loss': 9.8489, 'grad_norm': 1.8869800925798287e-05, 'learning_rate': 0.8048731766625803, 'epoch': 0.79} + 79%|███████▊ | 409/520 [26:08<06:57, 3.76s/it] 79%|███████▉ | 410/520 [26:11<06:51, 3.74s/it] {'loss': 8.4958, 'grad_norm': 1.7963460825348774e-05, 'learning_rate': 0.7910066210682041, 'epoch': 0.79} + 79%|███████▉ | 410/520 [26:11<06:51, 3.74s/it] 79%|███████▉ | 411/520 [26:15<06:49, 3.75s/it] {'loss': 9.4838, 'grad_norm': 1.4515289163781245e-05, 'learning_rate': 0.7772453210734984, 'epoch': 0.79} + 79%|███████▉ | 411/520 [26:15<06:49, 3.75s/it] 79%|███████▉ | 412/520 [26:19<06:44, 3.74s/it] {'loss': 9.3475, 'grad_norm': 1.7371389497018575e-05, 'learning_rate': 0.7635898113618957, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:19<06:44, 3.74s/it] 79%|███████▉ | 413/520 [26:22<06:39, 3.73s/it] {'loss': 10.4377, 'grad_norm': 2.2801712750788126e-05, 'learning_rate': 0.7500406225064428, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:22<06:39, 3.73s/it] 80%|███████▉ | 414/520 [26:26<06:34, 3.72s/it] {'loss': 9.1206, 'grad_norm': 2.9098158431615854e-05, 'learning_rate': 0.7365982809491765, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:26<06:34, 3.72s/it] 80%|███████▉ | 415/520 [26:30<06:30, 3.72s/it] {'loss': 8.6344, 'grad_norm': 1.1261855116722807e-05, 'learning_rate': 0.7232633089806773, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:30<06:30, 3.72s/it] 80%|████████ | 416/520 [26:34<06:26, 3.71s/it] {'loss': 9.271, 'grad_norm': 2.1207479002752985e-05, 'learning_rate': 0.7100362247197725, 'epoch': 0.8} + 80%|████████ | 416/520 [26:34<06:26, 3.71s/it] 80%|████████ | 417/520 [26:37<06:21, 3.71s/it] {'loss': 9.2189, 'grad_norm': 1.5202530479041534e-05, 'learning_rate': 0.6969175420934025, 'epoch': 0.8} + 80%|████████ | 417/520 [26:37<06:21, 3.71s/it] 80%|████████ | 418/520 [26:41<06:18, 3.71s/it] {'loss': 9.4199, 'grad_norm': 1.7684487972920974e-05, 'learning_rate': 0.6839077708166608, 'epoch': 0.8} + 80%|████████ | 418/520 [26:41<06:18, 3.71s/it] 81%|████████ | 419/520 [26:45<06:13, 3.70s/it] {'loss': 9.5977, 'grad_norm': 1.784618392674478e-05, 'learning_rate': 0.6710074163729818, 'epoch': 0.81} + 81%|████████ | 419/520 [26:45<06:13, 3.70s/it] 81%|████████ | 420/520 [26:48<06:09, 3.70s/it] {'loss': 8.9232, 'grad_norm': 1.040090968073547e-05, 'learning_rate': 0.6582169799945022, 'epoch': 0.81} + 81%|████████ | 420/520 [26:48<06:09, 3.70s/it] 81%|████████ | 421/520 [26:52<06:05, 3.70s/it] {'loss': 8.7033, 'grad_norm': 2.0532241580312337e-05, 'learning_rate': 0.6455369586425894, 'epoch': 0.81} + 81%|████████ | 421/520 [26:52<06:05, 3.70s/it] 81%|████████ | 422/520 [26:56<06:02, 3.70s/it] {'loss': 8.8311, 'grad_norm': 1.0234738737206944e-05, 'learning_rate': 0.6329678449885283, 'epoch': 0.81} + 81%|████████ | 422/520 [26:56<06:02, 3.70s/it] 81%|████████▏ | 423/520 [26:59<05:59, 3.70s/it] {'loss': 9.6185, 'grad_norm': 1.924689069244178e-05, 'learning_rate': 0.6205101273943833, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:59<05:59, 3.70s/it] 82%|████████▏ | 424/520 [27:03<05:55, 3.71s/it] {'loss': 10.268, 'grad_norm': 2.057898151326792e-05, 'learning_rate': 0.6081642898940186, 'epoch': 0.82} + 82%|████████▏ | 424/520 [27:03<05:55, 3.71s/it] 82%|████████▏ | 425/520 [27:07<05:50, 3.69s/it] {'loss': 8.8353, 'grad_norm': 1.2423969021299413e-05, 'learning_rate': 0.5959308121742938, 'epoch': 0.82} + 82%|████████▏ | 425/520 [27:07<05:50, 3.69s/it] 82%|████████▏ | 426/520 [27:10<05:44, 3.67s/it] {'loss': 9.7228, 'grad_norm': 1.939322319293713e-05, 'learning_rate': 0.5838101695564291, 'epoch': 0.82} + 82%|████████▏ | 426/520 [27:10<05:44, 3.67s/it] 82%|████████▏ | 427/520 [27:14<05:41, 3.67s/it] {'loss': 8.5055, 'grad_norm': 1.655154681463617e-05, 'learning_rate': 0.5718028329775309, 'epoch': 0.82} + 82%|████████▏ | 427/520 [27:14<05:41, 3.67s/it] 82%|████████▏ | 428/520 [27:18<05:36, 3.66s/it] {'loss': 8.544, 'grad_norm': 1.3107997839227614e-05, 'learning_rate': 0.5599092689723001, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:18<05:36, 3.66s/it] 82%|████████▎ | 429/520 [27:21<05:34, 3.67s/it] {'loss': 9.0449, 'grad_norm': 1.0915757944656566e-05, 'learning_rate': 0.5481299396549008, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:21<05:34, 3.67s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:25<05:31, 3.69s/it] {'loss': 8.3466, 'grad_norm': 1.990035431341987e-05, 'learning_rate': 0.5364653027010056, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:25<05:31, 3.69s/it] 83%|████████▎ | 431/520 [27:29<05:30, 3.71s/it] {'loss': 10.0138, 'grad_norm': 1.2731772499103877e-05, 'learning_rate': 0.5249158113300181, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:29<05:30, 3.71s/it] 83%|████████▎ | 432/520 [27:33<05:31, 3.77s/it] {'loss': 8.7238, 'grad_norm': 1.1825522914830537e-05, 'learning_rate': 0.5134819142874554, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:33<05:31, 3.77s/it] 83%|████████▎ | 433/520 [27:37<05:30, 3.80s/it] {'loss': 9.1312, 'grad_norm': 1.2175607140561728e-05, 'learning_rate': 0.5021640558275203, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:37<05:30, 3.80s/it] 83%|████████▎ | 434/520 [27:41<05:28, 3.82s/it] {'loss': 8.0539, 'grad_norm': 2.119562145370399e-05, 'learning_rate': 0.49096267569583396, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:41<05:28, 3.82s/it] 84%|████████▎ | 435/520 [27:44<05:25, 3.83s/it] {'loss': 9.4527, 'grad_norm': 1.4800522357698314e-05, 'learning_rate': 0.47987820911235435, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:44<05:25, 3.83s/it] 84%|████████▍ | 436/520 [27:48<05:18, 3.80s/it] {'loss': 8.7965, 'grad_norm': 1.0855615013930891e-05, 'learning_rate': 0.4689110867544645, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:48<05:18, 3.80s/it] 84%|████████▍ | 437/520 [27:52<05:13, 3.77s/it] {'loss': 9.7436, 'grad_norm': 1.9207153509172517e-05, 'learning_rate': 0.4580617347402376, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:52<05:13, 3.77s/it] 84%|████████▍ | 438/520 [27:56<05:05, 3.73s/it] {'loss': 8.4169, 'grad_norm': 1.6091523277972604e-05, 'learning_rate': 0.44733057461188136, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:56<05:05, 3.73s/it] 84%|████████▍ | 439/520 [27:59<05:03, 3.75s/it] {'loss': 9.5184, 'grad_norm': 2.6342254180532307e-05, 'learning_rate': 0.4367180233193621, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:59<05:03, 3.75s/it] 85%|████████▍ | 440/520 [28:03<04:59, 3.74s/it] {'loss': 9.1577, 'grad_norm': 1.0309083459423662e-05, 'learning_rate': 0.4262244932041997, 'epoch': 0.85} + 85%|████████▍ | 440/520 [28:03<04:59, 3.74s/it] 85%|████████▍ | 441/520 [28:07<04:53, 3.71s/it] {'loss': 9.8659, 'grad_norm': 2.098267514689905e-05, 'learning_rate': 0.4158503919834516, 'epoch': 0.85} + 85%|████████▍ | 441/520 [28:07<04:53, 3.71s/it] 85%|████████▌ | 442/520 [28:10<04:48, 3.69s/it] {'loss': 9.1336, 'grad_norm': 1.6554340557330923e-05, 'learning_rate': 0.4055961227338662, 'epoch': 0.85} + 85%|████████▌ | 442/520 [28:10<04:48, 3.69s/it] 85%|████████▌ | 443/520 [28:14<04:43, 3.68s/it] {'loss': 9.1892, 'grad_norm': 1.279762030544131e-05, 'learning_rate': 0.395462083876224, 'epoch': 0.85} + 85%|████████▌ | 443/520 [28:14<04:43, 3.68s/it] 85%|████████▌ | 444/520 [28:18<04:40, 3.69s/it] {'loss': 9.0385, 'grad_norm': 1.0080065105430345e-05, 'learning_rate': 0.3854486691598601, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:18<04:40, 3.69s/it] 86%|████████▌ | 445/520 [28:21<04:35, 3.68s/it] {'loss': 8.7549, 'grad_norm': 1.0923544156975119e-05, 'learning_rate': 0.3755562676473604, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:21<04:35, 3.68s/it] 86%|████████▌ | 446/520 [28:25<04:32, 3.68s/it] {'loss': 10.0413, 'grad_norm': 2.6943603511915372e-05, 'learning_rate': 0.36578526369944675, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:25<04:32, 3.68s/it] 86%|████████▌ | 447/520 [28:29<04:28, 3.68s/it] {'loss': 9.6271, 'grad_norm': 2.7066857190055205e-05, 'learning_rate': 0.35613603696004587, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:29<04:28, 3.68s/it] 86%|████████▌ | 448/520 [28:32<04:25, 3.68s/it] {'loss': 8.9187, 'grad_norm': 8.942417274825975e-06, 'learning_rate': 0.3466089623415334, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:32<04:25, 3.68s/it] 86%|████████▋ | 449/520 [28:36<04:22, 3.70s/it] {'loss': 10.127, 'grad_norm': 3.790203537075594e-05, 'learning_rate': 0.3372044100101723, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:36<04:22, 3.70s/it] 87%|████████▋ | 450/520 [28:40<04:17, 3.67s/it] {'loss': 9.3979, 'grad_norm': 2.5008083876479613e-05, 'learning_rate': 0.3279227453717252, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:40<04:17, 3.67s/it] 87%|████████▋ | 451/520 [28:43<04:13, 3.68s/it] {'loss': 9.559, 'grad_norm': 3.4575598893829636e-05, 'learning_rate': 0.3187643290572617, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:43<04:13, 3.68s/it] 87%|████████▋ | 452/520 [28:47<04:09, 3.67s/it] {'loss': 9.9472, 'grad_norm': 4.2061329768673986e-05, 'learning_rate': 0.309729516909144, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:47<04:09, 3.67s/it] 87%|████████▋ | 453/520 [28:51<04:05, 3.66s/it] {'loss': 10.172, 'grad_norm': 5.453285823571902e-05, 'learning_rate': 0.3008186599671995, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:51<04:05, 3.66s/it] 87%|████████▋ | 454/520 [28:54<04:03, 3.69s/it] {'loss': 8.8879, 'grad_norm': 1.3674203921531897e-05, 'learning_rate': 0.2920321044550833, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:54<04:03, 3.69s/it] 88%|████████▊ | 455/520 [28:58<03:59, 3.69s/it] {'loss': 9.2682, 'grad_norm': 1.7816233599509128e-05, 'learning_rate': 0.2833701917668277, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:58<03:59, 3.69s/it] 88%|████████▊ | 456/520 [29:02<03:54, 3.67s/it] {'loss': 8.8225, 'grad_norm': 1.315332741585056e-05, 'learning_rate': 0.2748332584535729, 'epoch': 0.88} + 88%|████████▊ | 456/520 [29:02<03:54, 3.67s/it] 88%|████████▊ | 457/520 [29:05<03:51, 3.68s/it] {'loss': 10.9128, 'grad_norm': 7.493595270627386e-05, 'learning_rate': 0.2664216362104964, 'epoch': 0.88} + 88%|████████▊ | 457/520 [29:05<03:51, 3.68s/it] 88%|████████▊ | 458/520 [29:09<03:47, 3.67s/it] {'loss': 9.6363, 'grad_norm': 3.366117873058892e-05, 'learning_rate': 0.25813565186391974, 'epoch': 0.88} + 88%|████████▊ | 458/520 [29:09<03:47, 3.67s/it] 88%|████████▊ | 459/520 [29:13<03:44, 3.69s/it] {'loss': 9.3523, 'grad_norm': 2.0876176121914424e-05, 'learning_rate': 0.24997562735861256, 'epoch': 0.88} + 88%|████████▊ | 459/520 [29:13<03:44, 3.69s/it] 88%|████████▊ | 460/520 [29:17<03:41, 3.68s/it] {'loss': 8.8057, 'grad_norm': 2.0004640209284925e-05, 'learning_rate': 0.24194187974528553, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:17<03:41, 3.68s/it] 89%|████████▊ | 461/520 [29:20<03:37, 3.68s/it] {'loss': 10.8476, 'grad_norm': 4.336901231507312e-05, 'learning_rate': 0.23403472116826723, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:20<03:37, 3.68s/it] 89%|████████▉ | 462/520 [29:24<03:33, 3.68s/it] {'loss': 10.532, 'grad_norm': 2.0166605951192336e-05, 'learning_rate': 0.22625445885338102, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:24<03:33, 3.68s/it] 89%|████████▉ | 463/520 [29:28<03:30, 3.69s/it] {'loss': 9.0, 'grad_norm': 1.6904011174795826e-05, 'learning_rate': 0.21860139509600318, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:28<03:30, 3.69s/it] 89%|████████▉ | 464/520 [29:31<03:26, 3.69s/it] {'loss': 9.7111, 'grad_norm': 1.712674462780285e-05, 'learning_rate': 0.2110758272493209, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:31<03:26, 3.69s/it] 89%|████████▉ | 465/520 [29:35<03:22, 3.69s/it] {'loss': 10.0715, 'grad_norm': 2.4273040959243978e-05, 'learning_rate': 0.20367804771277787, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:35<03:22, 3.69s/it] 90%|████████▉ | 466/520 [29:39<03:18, 3.68s/it] {'loss': 9.1055, 'grad_norm': 1.2458914888969466e-05, 'learning_rate': 0.1964083439207135, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:39<03:18, 3.68s/it] 90%|████████▉ | 467/520 [29:42<03:15, 3.69s/it] {'loss': 10.1187, 'grad_norm': 1.8580067735535386e-05, 'learning_rate': 0.18926699833119393, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:42<03:15, 3.69s/it] 90%|█████████ | 468/520 [29:46<03:11, 3.68s/it] {'loss': 9.5622, 'grad_norm': 1.6516926652502716e-05, 'learning_rate': 0.18225428841503905, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:46<03:11, 3.68s/it] 90%|█████████ | 469/520 [29:50<03:07, 3.68s/it] {'loss': 9.6885, 'grad_norm': 1.498190003943768e-05, 'learning_rate': 0.17537048664503901, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:50<03:07, 3.68s/it] 90%|█████████ | 470/520 [29:53<03:04, 3.69s/it] {'loss': 9.1072, 'grad_norm': 7.77225941176314e-06, 'learning_rate': 0.16861586048537175, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:53<03:04, 3.69s/it] 91%|█████████ | 471/520 [29:57<03:00, 3.69s/it] {'loss': 9.8815, 'grad_norm': 2.291138495306141e-05, 'learning_rate': 0.16199067238120612, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:57<03:00, 3.69s/it] 91%|█████████ | 472/520 [30:01<02:56, 3.68s/it] {'loss': 9.0572, 'grad_norm': 1.0347435888250173e-05, 'learning_rate': 0.15549517974850724, 'epoch': 0.91} + 91%|█████████ | 472/520 [30:01<02:56, 3.68s/it] 91%|█████████ | 473/520 [30:04<02:53, 3.68s/it] {'loss': 9.1505, 'grad_norm': 8.935310987688523e-06, 'learning_rate': 0.14912963496403675, 'epoch': 0.91} + 91%|█████████ | 473/520 [30:04<02:53, 3.68s/it] 91%|█████████ | 474/520 [30:08<02:48, 3.66s/it] {'loss': 10.4614, 'grad_norm': 1.470151448869131e-05, 'learning_rate': 0.14289428535554283, 'epoch': 0.91} + 91%|█████████ | 474/520 [30:08<02:48, 3.66s/it] 91%|█████████▏| 475/520 [30:12<02:45, 3.67s/it] {'loss': 9.3929, 'grad_norm': 2.598460549895659e-05, 'learning_rate': 0.1367893731921518, 'epoch': 0.91} + 91%|█████████▏| 475/520 [30:12<02:45, 3.67s/it] 92%|█████████▏| 476/520 [30:15<02:40, 3.66s/it] {'loss': 9.3341, 'grad_norm': 8.916123912961974e-06, 'learning_rate': 0.1308151356749579, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:15<02:40, 3.66s/it] 92%|█████████▏| 477/520 [30:19<02:37, 3.66s/it] {'loss': 8.9955, 'grad_norm': 6.87393238743508e-06, 'learning_rate': 0.12497180492780319, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:19<02:37, 3.66s/it] 92%|█████████▏| 478/520 [30:23<02:34, 3.67s/it] {'loss': 8.8214, 'grad_norm': 6.752640733712949e-06, 'learning_rate': 0.1192596079882613, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:23<02:34, 3.67s/it] 92%|█████████▏| 479/520 [30:26<02:30, 3.66s/it] {'loss': 10.27, 'grad_norm': 1.242148599031914e-05, 'learning_rate': 0.11367876679881361, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:26<02:30, 3.66s/it] 92%|█████████▏| 480/520 [30:30<02:26, 3.66s/it] {'loss': 10.1991, 'grad_norm': 1.4928500281123087e-05, 'learning_rate': 0.10822949819822753, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:30<02:26, 3.66s/it] 92%|█████████▎| 481/520 [30:34<02:25, 3.72s/it] {'loss': 9.9904, 'grad_norm': 1.252961518539264e-05, 'learning_rate': 0.10291201391313165, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:34<02:25, 3.72s/it] 93%|█████████▎| 482/520 [30:38<02:21, 3.73s/it] {'loss': 10.5066, 'grad_norm': 1.1915212843524e-05, 'learning_rate': 0.09772652054978925, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:38<02:21, 3.73s/it] 93%|█████████▎| 483/520 [30:41<02:19, 3.77s/it] {'loss': 9.5034, 'grad_norm': 1.1372779298232713e-05, 'learning_rate': 0.09267321958606828, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:41<02:19, 3.77s/it] 93%|█████████▎| 484/520 [30:45<02:16, 3.80s/it] {'loss': 9.5554, 'grad_norm': 1.3910158905073698e-05, 'learning_rate': 0.08775230736361733, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:45<02:16, 3.80s/it] 93%|█████████▎| 485/520 [30:49<02:14, 3.84s/it] {'loss': 8.9738, 'grad_norm': 7.083049263715619e-06, 'learning_rate': 0.08296397508023323, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:49<02:14, 3.84s/it] 93%|█████████▎| 486/520 [30:53<02:11, 3.86s/it] {'loss': 9.4143, 'grad_norm': 1.4217566043262963e-05, 'learning_rate': 0.07830840878243411, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:53<02:11, 3.86s/it] 94%|█████████▎| 487/520 [30:57<02:07, 3.87s/it] {'loss': 8.6292, 'grad_norm': 9.571935343844036e-06, 'learning_rate': 0.07378578935823071, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:57<02:07, 3.87s/it] 94%|█████████▍| 488/520 [31:01<02:01, 3.81s/it] {'loss': 8.9345, 'grad_norm': 9.320136954337238e-06, 'learning_rate': 0.0693962925300966, 'epoch': 0.94} + 94%|█████████▍| 488/520 [31:01<02:01, 3.81s/it] 94%|█████████▍| 489/520 [31:05<01:57, 3.79s/it] {'loss': 9.8856, 'grad_norm': 1.682407949822129e-05, 'learning_rate': 0.06514008884814321, 'epoch': 0.94} + 94%|█████████▍| 489/520 [31:05<01:57, 3.79s/it] 94%|█████████▍| 490/520 [31:08<01:53, 3.77s/it] {'loss': 9.048, 'grad_norm': 6.438671131295832e-06, 'learning_rate': 0.06101734368349104, 'epoch': 0.94} + 94%|█████████▍| 490/520 [31:08<01:53, 3.77s/it] 94%|█████████▍| 491/520 [31:12<01:49, 3.77s/it] {'loss': 9.0023, 'grad_norm': 7.789224166058769e-06, 'learning_rate': 0.05702821722184537, 'epoch': 0.94} + 94%|█████████▍| 491/520 [31:12<01:49, 3.77s/it] 95%|█████████▍| 492/520 [31:16<01:45, 3.75s/it] {'loss': 9.2707, 'grad_norm': 9.863471545838477e-06, 'learning_rate': 0.05317286445727193, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:16<01:45, 3.75s/it] 95%|█████████▍| 493/520 [31:19<01:41, 3.76s/it] {'loss': 10.4164, 'grad_norm': 1.4595759426114962e-05, 'learning_rate': 0.0494514351861744, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:20<01:41, 3.76s/it] 95%|█████████▌| 494/520 [31:23<01:37, 3.76s/it] {'loss': 9.3255, 'grad_norm': 9.945233965656428e-06, 'learning_rate': 0.045864074001476185, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:23<01:37, 3.76s/it] 95%|█████████▌| 495/520 [31:27<01:33, 3.75s/it] {'loss': 8.5074, 'grad_norm': 1.2974490890464726e-05, 'learning_rate': 0.0424109202869985, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:27<01:33, 3.75s/it] 95%|█████████▌| 496/520 [31:31<01:29, 3.74s/it] {'loss': 8.7921, 'grad_norm': 9.231780547730407e-06, 'learning_rate': 0.03909210821205017, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:31<01:29, 3.74s/it] 96%|█████████▌| 497/520 [31:34<01:25, 3.72s/it] {'loss': 9.7343, 'grad_norm': 1.9640672028962313e-05, 'learning_rate': 0.035907766726209045, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:34<01:25, 3.72s/it] 96%|█████████▌| 498/520 [31:38<01:21, 3.70s/it] {'loss': 8.8769, 'grad_norm': 8.466418258258216e-06, 'learning_rate': 0.032858019554315165, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:38<01:21, 3.70s/it] 96%|█████████▌| 499/520 [31:42<01:17, 3.70s/it] {'loss': 10.3597, 'grad_norm': 1.627769233657195e-05, 'learning_rate': 0.029942985191663662, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:42<01:17, 3.70s/it] 96%|█████████▌| 500/520 [31:45<01:13, 3.69s/it] {'loss': 9.7762, 'grad_norm': 1.7303428707666735e-05, 'learning_rate': 0.027162776899397778, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:45<01:13, 3.69s/it] 96%|█████████▋| 501/520 [31:49<01:10, 3.70s/it] {'loss': 10.0647, 'grad_norm': 1.2823843226072679e-05, 'learning_rate': 0.024517502700111327, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:49<01:10, 3.70s/it] 97%|█████████▋| 502/520 [31:53<01:06, 3.70s/it] {'loss': 9.1777, 'grad_norm': 1.0041165294195724e-05, 'learning_rate': 0.022007265373650886, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:53<01:06, 3.70s/it] 97%|█████████▋| 503/520 [31:57<01:02, 3.70s/it] {'loss': 9.9878, 'grad_norm': 1.7680582345096963e-05, 'learning_rate': 0.019632162453120827, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:57<01:02, 3.70s/it] 97%|█████████▋| 504/520 [32:00<00:59, 3.69s/it] {'loss': 9.517, 'grad_norm': 1.8828638188520696e-05, 'learning_rate': 0.017392286221095066, 'epoch': 0.97} + 97%|█████████▋| 504/520 [32:00<00:59, 3.69s/it] 97%|█████████▋| 505/520 [32:04<00:55, 3.70s/it] {'loss': 9.3795, 'grad_norm': 1.0671390402536916e-05, 'learning_rate': 0.015287723706031653, 'epoch': 0.97} + 97%|█████████▋| 505/520 [32:04<00:55, 3.70s/it] 97%|█████████▋| 506/520 [32:08<00:51, 3.69s/it] {'loss': 8.8262, 'grad_norm': 7.633711705311055e-06, 'learning_rate': 0.013318556678890592, 'epoch': 0.97} + 97%|█████████▋| 506/520 [32:08<00:51, 3.69s/it] 98%|█████████▊| 507/520 [32:11<00:48, 3.71s/it] {'loss': 10.8223, 'grad_norm': 1.7918614058117438e-05, 'learning_rate': 0.011484861649957212, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:11<00:48, 3.71s/it] 98%|█████████▊| 508/520 [32:15<00:44, 3.71s/it] {'loss': 9.56, 'grad_norm': 1.4379762036297496e-05, 'learning_rate': 0.009786709865869547, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:15<00:44, 3.71s/it] 98%|█████████▊| 509/520 [32:19<00:40, 3.71s/it] {'loss': 8.9604, 'grad_norm': 1.1187807266221421e-05, 'learning_rate': 0.00822416730684894, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:19<00:40, 3.71s/it] 98%|█████████▊| 510/520 [32:22<00:37, 3.71s/it] {'loss': 9.0219, 'grad_norm': 6.936051179447399e-06, 'learning_rate': 0.006797294684138533, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:22<00:37, 3.71s/it] 98%|█████████▊| 511/520 [32:26<00:33, 3.71s/it] {'loss': 9.1414, 'grad_norm': 7.599516501424861e-06, 'learning_rate': 0.005506147437641884, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:26<00:33, 3.71s/it] 98%|█████████▊| 512/520 [32:30<00:29, 3.71s/it] {'loss': 8.6591, 'grad_norm': 1.0863057395993473e-05, 'learning_rate': 0.0043507757337717945, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:30<00:29, 3.71s/it] 99%|█████████▊| 513/520 [32:34<00:26, 3.72s/it] {'loss': 9.2268, 'grad_norm': 7.427008112695985e-06, 'learning_rate': 0.003331224463497706, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:34<00:26, 3.72s/it] 99%|█████████▉| 514/520 [32:37<00:22, 3.70s/it] {'loss': 9.2379, 'grad_norm': 9.582931492683216e-06, 'learning_rate': 0.002447533240604871, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:37<00:22, 3.70s/it] 99%|█████████▉| 515/520 [32:41<00:18, 3.70s/it] {'loss': 9.6346, 'grad_norm': 1.5532052560160112e-05, 'learning_rate': 0.0016997364001532511, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:41<00:18, 3.70s/it] 99%|█████████▉| 516/520 [32:45<00:14, 3.69s/it] {'loss': 9.1955, 'grad_norm': 9.854970596885284e-06, 'learning_rate': 0.0010878629971431408, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:45<00:14, 3.69s/it] 99%|█████████▉| 517/520 [32:48<00:10, 3.66s/it] {'loss': 10.2461, 'grad_norm': 1.1733687860224014e-05, 'learning_rate': 0.000611936805387514, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:48<00:10, 3.66s/it] 100%|█████████▉| 518/520 [32:52<00:07, 3.66s/it] {'loss': 9.155, 'grad_norm': 9.21973005881888e-06, 'learning_rate': 0.00027197631658798516, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:52<00:07, 3.66s/it] 100%|█████████▉| 519/520 [32:56<00:03, 3.65s/it] {'loss': 10.0116, 'grad_norm': 1.4963002809288994e-05, 'learning_rate': 6.799473961632829e-05, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:56<00:03, 3.65s/it] 100%|██████████| 520/520 [33:00<00:00, 3.93s/it] {'loss': 10.3176, 'grad_norm': 2.304409942075027e-05, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [33:00<00:00, 3.93s/it] {'train_runtime': 1980.628, 'train_samples_per_second': 33.59, 'train_steps_per_second': 0.263, 'train_loss': 9.399066834495617, 'epoch': 1.0} + 100%|██████████| 520/520 [33:00<00:00, 3.93s/it] 100%|██████████| 520/520 [33:00<00:00, 3.81s/it] +[2025-10-09 06:58:09,615] [INFO] [launch.py:348:main] Process 847292 exits successfully. +[2025-10-09 06:58:09,615] [INFO] [launch.py:348:main] Process 847293 exits successfully. +[2025-10-09 06:58:09,616] [INFO] [launch.py:348:main] Process 847288 exits successfully. +[2025-10-09 06:58:09,616] [INFO] [launch.py:348:main] Process 847290 exits successfully. +[2025-10-09 06:58:09,617] [INFO] [launch.py:348:main] Process 847291 exits successfully. +[2025-10-09 06:58:10,618] [INFO] [launch.py:348:main] Process 847287 exits successfully. +[2025-10-09 06:58:10,619] [INFO] [launch.py:348:main] Process 847289 exits successfully. +[2025-10-09 06:58:13,622] [INFO] [launch.py:348:main] Process 847286 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7_connector-3.0_0.5_7_ablation_20251009_062342.log +Timestamp: 2025-10-09 06:58:16 +===================================== diff --git a/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation_20251009_094325.log b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation_20251009_094325.log new file mode 100644 index 0000000000000000000000000000000000000000..6080d1ea3d563d6770516ac0885127473de26871 --- /dev/null +++ b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation_20251009_094325.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation_20251009_094325.log +Timestamp: 2025-10-09 09:43:25 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 09:43:28,523] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 09:43:31,417] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-09 09:43:31,419] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 7e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 7e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 09:43:34,046] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 09:43:35,079] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-09 09:43:35,079] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-09 09:43:35,079] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-09 09:43:35,080] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-09 09:43:35,080] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-09 09:43:35,080] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-09 09:43:35,080] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-09 09:43:35,082] [INFO] [launch.py:253:main] process 1130310 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 09:43:35,084] [INFO] [launch.py:253:main] process 1130311 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 09:43:35,086] [INFO] [launch.py:253:main] process 1130312 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 09:43:35,088] [INFO] [launch.py:253:main] process 1130313 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 09:43:35,090] [INFO] [launch.py:253:main] process 1130314 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 09:43:35,092] [INFO] [launch.py:253:main] process 1130315 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 09:43:35,094] [INFO] [launch.py:253:main] process 1130316 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 09:43:35,096] [INFO] [launch.py:253:main] process 1130317 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '7e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '7e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 09:43:41,664] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 09:43:41,813] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 09:43:42,037] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 09:43:42,078] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 09:43:42,096] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 09:43:42,112] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 09:43:42,112] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 09:43:42,112] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 09:43:42,119] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 09:43:42,224] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 09:43:42,469] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 09:43:42,509] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 09:43:42,533] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 09:43:42,538] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 09:43:42,540] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 09:43:42,540] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-09 09:43:42,542] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1130310:1130310 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130310:1130310 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130310:1130310 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1130310:1130310 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1130310:1130310 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1130310:1130310 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +ywang29-vrdb-test1-worker-0:1130313:1130313 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1130313:1130313 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130313:1130313 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130313:1130313 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1130313:1130313 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1130313:1130313 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1130317:1130317 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1130317:1130317 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130317:1130317 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130311:1130311 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1130317:1130317 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1130317:1130317 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1130317:1130317 [7] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1130311:1130311 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130311:1130311 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130311:1130311 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1130311:1130311 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1130311:1130311 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1130314:1130314 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1130314:1130314 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130314:1130314 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130314:1130314 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1130314:1130314 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1130314:1130314 [4] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130316:1130316 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1130316:1130316 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130316:1130316 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130316:1130316 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1130316:1130316 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1130316:1130316 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1130315:1130315 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1130315:1130315 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130315:1130315 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130315:1130315 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1130315:1130315 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1130315:1130315 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1130312:1130312 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1130312:1130312 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130312:1130312 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130312:1130312 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1130312:1130312 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1130312:1130312 [2] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO ncclCommInitRank comm 0x561efe611320 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x65abe10096acbc77 - Init START +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO ncclCommInitRank comm 0x5603cb6f7fb0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x65abe10096acbc77 - Init START +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO ncclCommInitRank comm 0x55cc6f20fe30 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x65abe10096acbc77 - Init START +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO ncclCommInitRank comm 0x558608500c60 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x65abe10096acbc77 - Init START +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO ncclCommInitRank comm 0x55a72bc7cb00 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x65abe10096acbc77 - Init START +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO ncclCommInitRank comm 0x55daac8cd260 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x65abe10096acbc77 - Init START +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO ncclCommInitRank comm 0x55c241f46ea0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x65abe10096acbc77 - Init START +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO ncclCommInitRank comm 0x563f141f8010 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x65abe10096acbc77 - Init START +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO comm 0x561efe611320 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO comm 0x558608500c60 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO comm 0x55c241f46ea0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO comm 0x55a72bc7cb00 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO comm 0x55cc6f20fe30 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO comm 0x563f141f8010 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO comm 0x5603cb6f7fb0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO comm 0x55daac8cd260 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1130316:1131954 [6] NCCL INFO ncclCommInitRank comm 0x55cc6f20fe30 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x65abe10096acbc77 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1130314:1131953 [4] NCCL INFO ncclCommInitRank comm 0x5603cb6f7fb0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x65abe10096acbc77 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1130317:1131951 [7] NCCL INFO ncclCommInitRank comm 0x55c241f46ea0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x65abe10096acbc77 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1130315:1131955 [5] NCCL INFO ncclCommInitRank comm 0x563f141f8010 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x65abe10096acbc77 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1130313:1131950 [3] NCCL INFO ncclCommInitRank comm 0x55daac8cd260 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x65abe10096acbc77 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130311:1131952 [1] NCCL INFO ncclCommInitRank comm 0x558608500c60 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x65abe10096acbc77 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1130310:1131949 [0] NCCL INFO ncclCommInitRank comm 0x55a72bc7cb00 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x65abe10096acbc77 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130312:1131956 [2] NCCL INFO ncclCommInitRank comm 0x561efe611320 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x65abe10096acbc77 - Init COMPLETE +[2025-10-09 09:44:27,163] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-09 09:49:07,967] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-09 09:49:20,916 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-09 09:49:20,920 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:005->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1130317:1137075 [7] NCCL INFO ncclCommInitRank comm 0x7f0790069fe0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xd36e1f99cd6c7ebd - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130313:1137071 [3] NCCL INFO ncclCommInitRank comm 0x7fbbc006ac10 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xd36e1f99cd6c7ebd - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130315:1137074 [5] NCCL INFO ncclCommInitRank comm 0x7f7d9c06adb0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xd36e1f99cd6c7ebd - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130311:1137070 [1] NCCL INFO ncclCommInitRank comm 0x7f46fc06a900 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xd36e1f99cd6c7ebd - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130314:1137073 [4] NCCL INFO ncclCommInitRank comm 0x7fb04006a880 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xd36e1f99cd6c7ebd - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130310:1137068 [0] NCCL INFO ncclCommInitRank comm 0x7fa57806af30 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xd36e1f99cd6c7ebd - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130312:1137072 [2] NCCL INFO ncclCommInitRank comm 0x7f7d0806aa00 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xd36e1f99cd6c7ebd - Init COMPLETE +ywang29-vrdb-test1-worker-0:1130316:1137069 [6] NCCL INFO ncclCommInitRank comm 0x7f2ee806b500 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xd36e1f99cd6c7ebd - Init COMPLETE + 0%| | 1/520 [00:12<1:44:30, 12.08s/it] {'loss': 2.0453, 'grad_norm': 0.004834278807740133, 'learning_rate': 0.04375, 'epoch': 0.0} + 0%| | 1/520 [00:12<1:44:30, 12.08s/it] 0%| | 2/520 [00:15<1:01:27, 7.12s/it] {'loss': 2.0549, 'grad_norm': 0.005247995790465865, 'learning_rate': 0.0875, 'epoch': 0.0} + 0%| | 2/520 [00:15<1:01:27, 7.12s/it] 1%| | 3/520 [00:19<47:38, 5.53s/it] {'loss': 2.1899, 'grad_norm': 0.006005648471060101, 'learning_rate': 0.13124999999999998, 'epoch': 0.01} + 1%| | 3/520 [00:19<47:38, 5.53s/it] 1%| | 4/520 [00:23<41:10, 4.79s/it] {'loss': 1.6537, 'grad_norm': 0.001740883086316344, 'learning_rate': 0.175, 'epoch': 0.01} + 1%| | 4/520 [00:23<41:10, 4.79s/it] 1%| | 5/520 [00:26<37:31, 4.37s/it] {'loss': 1.6652, 'grad_norm': 0.0009376593892523836, 'learning_rate': 0.21875, 'epoch': 0.01} + 1%| | 5/520 [00:26<37:31, 4.37s/it] 1%| | 6/520 [00:30<35:21, 4.13s/it] {'loss': 1.3882, 'grad_norm': 0.0004395221360921007, 'learning_rate': 0.26249999999999996, 'epoch': 0.01} + 1%| | 6/520 [00:30<35:21, 4.13s/it] 1%|▏ | 7/520 [00:33<33:53, 3.96s/it] {'loss': 1.4341, 'grad_norm': 0.0006265532423888469, 'learning_rate': 0.30624999999999997, 'epoch': 0.01} + 1%|▏ | 7/520 [00:33<33:53, 3.96s/it] 2%|▏ | 8/520 [00:38<34:42, 4.07s/it] {'loss': 1.4602, 'grad_norm': 0.0006449907307758009, 'learning_rate': 0.35, 'epoch': 0.02} + 2%|▏ | 8/520 [00:38<34:42, 4.07s/it] 2%|▏ | 9/520 [00:42<34:50, 4.09s/it] {'loss': 1.5256, 'grad_norm': 0.0008317981275994114, 'learning_rate': 0.39375, 'epoch': 0.02} + 2%|▏ | 9/520 [00:42<34:50, 4.09s/it] 2%|▏ | 10/520 [00:46<33:39, 3.96s/it] {'loss': 1.3581, 'grad_norm': 0.0011693998518132402, 'learning_rate': 0.4375, 'epoch': 0.02} + 2%|▏ | 10/520 [00:46<33:39, 3.96s/it] 2%|▏ | 11/520 [00:49<33:14, 3.92s/it] {'loss': 1.4423, 'grad_norm': 0.002381177176155957, 'learning_rate': 0.48124999999999996, 'epoch': 0.02} + 2%|▏ | 11/520 [00:49<33:14, 3.92s/it] 2%|▏ | 12/520 [00:53<32:31, 3.84s/it] {'loss': 1.3587, 'grad_norm': 0.002132315213514819, 'learning_rate': 0.5249999999999999, 'epoch': 0.02} + 2%|▏ | 12/520 [00:53<32:31, 3.84s/it][2025-10-09 09:50:24,470] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [00:57<33:49, 4.00s/it] {'loss': 1.4385, 'grad_norm': 0.0030005122747675476, 'learning_rate': 0.56875, 'epoch': 0.03} + 2%|▎ | 13/520 [00:57<33:49, 4.00s/it] 3%|▎ | 14/520 [01:01<32:58, 3.91s/it] {'loss': 1.5504, 'grad_norm': 0.007316681667543428, 'learning_rate': 0.6124999999999999, 'epoch': 0.03} + 3%|▎ | 14/520 [01:01<32:58, 3.91s/it] 3%|▎ | 15/520 [01:05<32:30, 3.86s/it] {'loss': 1.5174, 'grad_norm': 0.0036532279699684994, 'learning_rate': 0.65625, 'epoch': 0.03} + 3%|▎ | 15/520 [01:05<32:30, 3.86s/it] 3%|▎ | 16/520 [01:09<32:03, 3.82s/it] {'loss': 1.4951, 'grad_norm': 0.004037679030595796, 'learning_rate': 0.7, 'epoch': 0.03} + 3%|▎ | 16/520 [01:09<32:03, 3.82s/it] 3%|▎ | 17/520 [01:12<31:37, 3.77s/it] {'loss': 1.7959, 'grad_norm': 0.008162258315879597, 'learning_rate': 0.6999932005260383, 'epoch': 0.03} + 3%|▎ | 17/520 [01:12<31:37, 3.77s/it] 3%|▎ | 18/520 [01:16<31:13, 3.73s/it] {'loss': 2.0403, 'grad_norm': 0.015535619519797601, 'learning_rate': 0.6999728023683411, 'epoch': 0.03} + 3%|▎ | 18/520 [01:16<31:13, 3.73s/it] 4%|▎ | 19/520 [01:20<31:00, 3.71s/it] {'loss': 2.2517, 'grad_norm': 0.029333917581119667, 'learning_rate': 0.6999388063194613, 'epoch': 0.04} + 4%|▎ | 19/520 [01:20<31:00, 3.71s/it] 4%|▍ | 20/520 [01:23<30:45, 3.69s/it] {'loss': 2.2608, 'grad_norm': 0.02009992855347179, 'learning_rate': 0.6998912137002856, 'epoch': 0.04} + 4%|▍ | 20/520 [01:23<30:45, 3.69s/it] 4%|▍ | 21/520 [01:27<30:44, 3.70s/it] {'loss': 2.5598, 'grad_norm': 0.04163644748143006, 'learning_rate': 0.6998300263599846, 'epoch': 0.04} + 4%|▍ | 21/520 [01:27<30:44, 3.70s/it] 4%|▍ | 22/520 [01:31<30:49, 3.71s/it] {'loss': 2.5115, 'grad_norm': 0.022332401574798447, 'learning_rate': 0.6997552466759395, 'epoch': 0.04} + 4%|▍ | 22/520 [01:31<30:49, 3.71s/it] 4%|▍ | 23/520 [01:34<30:39, 3.70s/it] {'loss': 2.5312, 'grad_norm': 0.026265472546397953, 'learning_rate': 0.6996668775536502, 'epoch': 0.04} + 4%|▍ | 23/520 [01:34<30:39, 3.70s/it] 5%|▍ | 24/520 [01:38<30:28, 3.69s/it] {'loss': 2.3723, 'grad_norm': 0.01574468971533386, 'learning_rate': 0.6995649224266227, 'epoch': 0.05} + 5%|▍ | 24/520 [01:38<30:28, 3.69s/it] 5%|▍ | 25/520 [01:42<30:21, 3.68s/it] {'loss': 2.1988, 'grad_norm': 0.019859835410233964, 'learning_rate': 0.6994493852562358, 'epoch': 0.05} + 5%|▍ | 25/520 [01:42<30:21, 3.68s/it] 5%|▌ | 26/520 [01:45<30:41, 3.73s/it] {'loss': 2.118, 'grad_norm': 0.00835118508647837, 'learning_rate': 0.6993202705315862, 'epoch': 0.05} + 5%|▌ | 26/520 [01:45<30:41, 3.73s/it] 5%|▌ | 27/520 [01:49<30:56, 3.77s/it] {'loss': 1.8547, 'grad_norm': 0.0039000374090282404, 'learning_rate': 0.6991775832693151, 'epoch': 0.05} + 5%|▌ | 27/520 [01:49<30:56, 3.77s/it] 5%|▌ | 28/520 [01:53<31:05, 3.79s/it] {'loss': 1.852, 'grad_norm': 0.0045306830216645, 'learning_rate': 0.699021329013413, 'epoch': 0.05} + 5%|▌ | 28/520 [01:53<31:05, 3.79s/it] 6%|▌ | 29/520 [01:57<31:21, 3.83s/it] {'loss': 1.7933, 'grad_norm': 0.0034074586740331938, 'learning_rate': 0.6988515138350042, 'epoch': 0.06} + 6%|▌ | 29/520 [01:57<31:21, 3.83s/it] 6%|▌ | 30/520 [02:01<31:26, 3.85s/it] {'loss': 2.3204, 'grad_norm': 0.008828683441886172, 'learning_rate': 0.6986681443321109, 'epoch': 0.06} + 6%|▌ | 30/520 [02:01<31:26, 3.85s/it] 6%|▌ | 31/520 [02:05<31:19, 3.84s/it] {'loss': 1.7968, 'grad_norm': 0.0032732163602976895, 'learning_rate': 0.6984712276293967, 'epoch': 0.06} + 6%|▌ | 31/520 [02:05<31:19, 3.84s/it] 6%|▌ | 32/520 [02:09<31:34, 3.88s/it] {'loss': 2.5092, 'grad_norm': 0.00985630916196738, 'learning_rate': 0.6982607713778904, 'epoch': 0.06} + 6%|▌ | 32/520 [02:09<31:34, 3.88s/it] 6%|▋ | 33/520 [02:13<31:27, 3.88s/it] {'loss': 1.7517, 'grad_norm': 0.003406924191875875, 'learning_rate': 0.6980367837546879, 'epoch': 0.06} + 6%|▋ | 33/520 [02:13<31:27, 3.88s/it] 7%|▋ | 34/520 [02:16<31:17, 3.86s/it] {'loss': 1.7236, 'grad_norm': 0.002862239534054578, 'learning_rate': 0.6977992734626349, 'epoch': 0.07} + 7%|▋ | 34/520 [02:16<31:17, 3.86s/it] 7%|▋ | 35/520 [02:20<31:12, 3.86s/it] {'loss': 1.7032, 'grad_norm': 0.0029108614020275435, 'learning_rate': 0.6975482497299887, 'epoch': 0.07} + 7%|▋ | 35/520 [02:20<31:12, 3.86s/it] 7%|▋ | 36/520 [02:24<31:03, 3.85s/it] {'loss': 1.8388, 'grad_norm': 0.0021122666367240413, 'learning_rate': 0.6972837223100602, 'epoch': 0.07} + 7%|▋ | 36/520 [02:24<31:03, 3.85s/it] 7%|▋ | 37/520 [02:28<31:02, 3.86s/it] {'loss': 2.1069, 'grad_norm': 0.005462797800880719, 'learning_rate': 0.6970057014808336, 'epoch': 0.07} + 7%|▋ | 37/520 [02:28<31:02, 3.86s/it] 7%|▋ | 38/520 [02:32<30:56, 3.85s/it] {'loss': 1.8887, 'grad_norm': 0.0021074034813285875, 'learning_rate': 0.6967141980445684, 'epoch': 0.07} + 7%|▋ | 38/520 [02:32<30:56, 3.85s/it] 8%|▊ | 39/520 [02:36<30:53, 3.85s/it] {'loss': 1.6914, 'grad_norm': 0.0028004705321816627, 'learning_rate': 0.6964092233273791, 'epoch': 0.07} + 8%|▊ | 39/520 [02:36<30:53, 3.85s/it] 8%|▊ | 40/520 [02:40<30:57, 3.87s/it] {'loss': 1.7233, 'grad_norm': 0.0016600406963376139, 'learning_rate': 0.6960907891787949, 'epoch': 0.08} + 8%|▊ | 40/520 [02:40<30:57, 3.87s/it] 8%|▊ | 41/520 [02:43<30:42, 3.85s/it] {'loss': 1.6604, 'grad_norm': 0.0020386509248121734, 'learning_rate': 0.6957589079713, 'epoch': 0.08} + 8%|▊ | 41/520 [02:43<30:42, 3.85s/it] 8%|▊ | 42/520 [02:47<30:14, 3.80s/it] {'loss': 1.7055, 'grad_norm': 0.0021602084096998954, 'learning_rate': 0.6954135925998524, 'epoch': 0.08} + 8%|▊ | 42/520 [02:47<30:14, 3.80s/it] 8%|▊ | 43/520 [02:51<30:00, 3.77s/it] {'loss': 1.8469, 'grad_norm': 0.002381737125240196, 'learning_rate': 0.6950548564813824, 'epoch': 0.08} + 8%|▊ | 43/520 [02:51<30:00, 3.77s/it] 8%|▊ | 44/520 [02:54<29:40, 3.74s/it] {'loss': 2.0038, 'grad_norm': 0.0029082260638710732, 'learning_rate': 0.6946827135542728, 'epoch': 0.08} + 8%|▊ | 44/520 [02:54<29:40, 3.74s/it] 9%|▊ | 45/520 [02:58<29:30, 3.73s/it] {'loss': 1.6743, 'grad_norm': 0.0015699750369012125, 'learning_rate': 0.6942971782778155, 'epoch': 0.09} + 9%|▊ | 45/520 [02:58<29:30, 3.73s/it] 9%|▉ | 46/520 [03:02<29:10, 3.69s/it] {'loss': 2.0408, 'grad_norm': 0.0022477376276371533, 'learning_rate': 0.6938982656316509, 'epoch': 0.09} + 9%|▉ | 46/520 [03:02<29:10, 3.69s/it] 9%|▉ | 47/520 [03:05<28:56, 3.67s/it] {'loss': 1.6721, 'grad_norm': 0.00143568295294714, 'learning_rate': 0.6934859911151857, 'epoch': 0.09} + 9%|▉ | 47/520 [03:05<28:56, 3.67s/it] 9%|▉ | 48/520 [03:09<28:53, 3.67s/it] {'loss': 1.6437, 'grad_norm': 0.001512666592864102, 'learning_rate': 0.6930603707469903, 'epoch': 0.09} + 9%|▉ | 48/520 [03:09<28:53, 3.67s/it] 9%|▉ | 49/520 [03:13<28:47, 3.67s/it] {'loss': 1.6536, 'grad_norm': 0.0013677713968727851, 'learning_rate': 0.6926214210641769, 'epoch': 0.09} + 9%|▉ | 49/520 [03:13<28:47, 3.67s/it] 10%|▉ | 50/520 [03:16<28:33, 3.65s/it] {'loss': 1.6439, 'grad_norm': 0.0011963622641485772, 'learning_rate': 0.6921691591217566, 'epoch': 0.1} + 10%|▉ | 50/520 [03:16<28:33, 3.65s/it] 10%|▉ | 51/520 [03:20<28:36, 3.66s/it] {'loss': 1.5496, 'grad_norm': 0.0012677694586159108, 'learning_rate': 0.6917036024919766, 'epoch': 0.1} + 10%|▉ | 51/520 [03:20<28:36, 3.66s/it] 10%|█ | 52/520 [03:24<28:23, 3.64s/it] {'loss': 1.6946, 'grad_norm': 0.0013282365917306105, 'learning_rate': 0.6912247692636382, 'epoch': 0.1} + 10%|█ | 52/520 [03:24<28:23, 3.64s/it] 10%|█ | 53/520 [03:27<28:20, 3.64s/it] {'loss': 1.6797, 'grad_norm': 0.0012431990726565478, 'learning_rate': 0.690732678041393, 'epoch': 0.1} + 10%|█ | 53/520 [03:27<28:20, 3.64s/it] 10%|█ | 54/520 [03:31<28:15, 3.64s/it] {'loss': 1.5677, 'grad_norm': 0.001082099210238527, 'learning_rate': 0.690227347945021, 'epoch': 0.1} + 10%|█ | 54/520 [03:31<28:15, 3.64s/it] 11%|█ | 55/520 [03:35<28:10, 3.64s/it] {'loss': 1.5507, 'grad_norm': 0.002995363619544303, 'learning_rate': 0.6897087986086868, 'epoch': 0.11} + 11%|█ | 55/520 [03:35<28:10, 3.64s/it] 11%|█ | 56/520 [03:38<28:07, 3.64s/it] {'loss': 1.6921, 'grad_norm': 0.0011440080379870833, 'learning_rate': 0.6891770501801773, 'epoch': 0.11} + 11%|█ | 56/520 [03:38<28:07, 3.64s/it] 11%|█ | 57/520 [03:42<28:05, 3.64s/it] {'loss': 1.5335, 'grad_norm': 0.001113359679411517, 'learning_rate': 0.6886321233201187, 'epoch': 0.11} + 11%|█ | 57/520 [03:42<28:05, 3.64s/it] 11%|█ | 58/520 [03:45<27:55, 3.63s/it] {'loss': 1.6898, 'grad_norm': 0.0009495496373282486, 'learning_rate': 0.6880740392011738, 'epoch': 0.11} + 11%|█ | 58/520 [03:45<27:55, 3.63s/it] 11%|█▏ | 59/520 [03:49<28:06, 3.66s/it] {'loss': 1.7028, 'grad_norm': 0.0021049243718306867, 'learning_rate': 0.6875028195072197, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:49<28:06, 3.66s/it] 12%|█▏ | 60/520 [03:53<28:03, 3.66s/it] {'loss': 1.5952, 'grad_norm': 0.0010202728033177806, 'learning_rate': 0.6869184864325041, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:53<28:03, 3.66s/it] 12%|█▏ | 61/520 [03:56<27:55, 3.65s/it] {'loss': 1.8429, 'grad_norm': 0.001165701779358835, 'learning_rate': 0.6863210626807849, 'epoch': 0.12} + 12%|█▏ | 61/520 [03:56<27:55, 3.65s/it] 12%|█▏ | 62/520 [04:00<27:47, 3.64s/it] {'loss': 1.5659, 'grad_norm': 0.0010693122178383255, 'learning_rate': 0.6857105714644457, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:00<27:47, 3.64s/it] 12%|█▏ | 63/520 [04:04<27:41, 3.64s/it] {'loss': 1.5928, 'grad_norm': 0.0009549712386575197, 'learning_rate': 0.6850870365035963, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:04<27:41, 3.64s/it] 12%|█▏ | 64/520 [04:07<27:40, 3.64s/it] {'loss': 1.5824, 'grad_norm': 0.0010643989950137077, 'learning_rate': 0.6844504820251492, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:07<27:40, 3.64s/it] 12%|█▎ | 65/520 [04:11<27:53, 3.68s/it] {'loss': 1.6172, 'grad_norm': 0.001164956064595674, 'learning_rate': 0.6838009327618794, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:11<27:53, 3.68s/it] 13%|█▎ | 66/520 [04:15<27:46, 3.67s/it] {'loss': 1.5575, 'grad_norm': 0.0008414152089015475, 'learning_rate': 0.6831384139514628, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:15<27:46, 3.67s/it] 13%|█▎ | 67/520 [04:18<27:44, 3.67s/it] {'loss': 1.4202, 'grad_norm': 0.0008657692298919422, 'learning_rate': 0.6824629513354961, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:18<27:44, 3.67s/it] 13%|█▎ | 68/520 [04:22<27:40, 3.67s/it] {'loss': 1.4788, 'grad_norm': 0.0009364581716641025, 'learning_rate': 0.681774571158496, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:22<27:40, 3.67s/it] 13%|█▎ | 69/520 [04:26<27:33, 3.67s/it] {'loss': 1.454, 'grad_norm': 0.0008855785660711081, 'learning_rate': 0.6810733001668805, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:26<27:33, 3.67s/it] 13%|█▎ | 70/520 [04:29<27:33, 3.67s/it] {'loss': 1.5347, 'grad_norm': 0.0010506734651085931, 'learning_rate': 0.6803591656079286, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:29<27:33, 3.67s/it] 14%|█▎ | 71/520 [04:33<27:30, 3.68s/it] {'loss': 1.4196, 'grad_norm': 0.0007585227545743746, 'learning_rate': 0.6796321952287222, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:33<27:30, 3.68s/it] 14%|█▍ | 72/520 [04:37<27:26, 3.67s/it] {'loss': 1.5736, 'grad_norm': 0.0009255792243577866, 'learning_rate': 0.6788924172750679, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:37<27:26, 3.67s/it] 14%|█▍ | 73/520 [04:40<27:24, 3.68s/it] {'loss': 1.3967, 'grad_norm': 0.0008571902194713269, 'learning_rate': 0.6781398604903998, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:40<27:24, 3.68s/it] 14%|█▍ | 74/520 [04:44<27:19, 3.68s/it] {'loss': 1.5168, 'grad_norm': 0.0007998618164513924, 'learning_rate': 0.6773745541146619, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:44<27:19, 3.68s/it] 14%|█▍ | 75/520 [04:48<27:15, 3.67s/it] {'loss': 1.4192, 'grad_norm': 0.0006799942039133047, 'learning_rate': 0.6765965278831731, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:48<27:15, 3.67s/it] 15%|█▍ | 76/520 [04:52<27:16, 3.68s/it] {'loss': 1.7875, 'grad_norm': 0.0007660711791662314, 'learning_rate': 0.6758058120254714, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:52<27:16, 3.68s/it] 15%|█▍ | 77/520 [04:55<27:07, 3.67s/it] {'loss': 1.3377, 'grad_norm': 0.0009737188033163908, 'learning_rate': 0.6750024372641387, 'epoch': 0.15} + 15%|█▍ | 77/520 [04:55<27:07, 3.67s/it] 15%|█▌ | 78/520 [04:59<27:00, 3.67s/it] {'loss': 1.478, 'grad_norm': 0.0007080063165064295, 'learning_rate': 0.674186434813608, 'epoch': 0.15} + 15%|█▌ | 78/520 [04:59<27:00, 3.67s/it] 15%|█▌ | 79/520 [05:03<27:05, 3.69s/it] {'loss': 1.4495, 'grad_norm': 0.0006610751812454766, 'learning_rate': 0.6733578363789503, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:03<27:05, 3.69s/it] 15%|█▌ | 80/520 [05:06<26:58, 3.68s/it] {'loss': 1.7941, 'grad_norm': 0.0008870916830016612, 'learning_rate': 0.6725166741546427, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:06<26:58, 3.68s/it] 16%|█▌ | 81/520 [05:10<26:48, 3.66s/it] {'loss': 1.6114, 'grad_norm': 0.0008733136983187711, 'learning_rate': 0.6716629808233171, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:10<26:48, 3.66s/it] 16%|█▌ | 82/520 [05:13<26:40, 3.65s/it] {'loss': 1.5188, 'grad_norm': 0.0008121397717508324, 'learning_rate': 0.6707967895544916, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:13<26:40, 3.65s/it] 16%|█▌ | 83/520 [05:17<26:35, 3.65s/it] {'loss': 1.5541, 'grad_norm': 0.0007099632749343796, 'learning_rate': 0.66991813400328, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:17<26:35, 3.65s/it] 16%|█▌ | 84/520 [05:21<26:33, 3.66s/it] {'loss': 1.5473, 'grad_norm': 0.0007561282000090176, 'learning_rate': 0.6690270483090855, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:21<26:33, 3.66s/it] 16%|█▋ | 85/520 [05:24<26:23, 3.64s/it] {'loss': 1.5489, 'grad_norm': 0.0006557923725666118, 'learning_rate': 0.6681235670942738, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:24<26:23, 3.64s/it] 17%|█▋ | 86/520 [05:28<26:48, 3.71s/it] {'loss': 1.5838, 'grad_norm': 0.0007600734197451958, 'learning_rate': 0.6672077254628275, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:28<26:48, 3.71s/it] 17%|█▋ | 87/520 [05:32<27:00, 3.74s/it] {'loss': 1.7312, 'grad_norm': 0.0008834191428762701, 'learning_rate': 0.6662795589989828, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:32<27:00, 3.74s/it] 17%|█▋ | 88/520 [05:36<27:14, 3.78s/it] {'loss': 1.8296, 'grad_norm': 0.000820929326026817, 'learning_rate': 0.6653391037658466, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:36<27:14, 3.78s/it] 17%|█▋ | 89/520 [05:40<27:20, 3.81s/it] {'loss': 1.5193, 'grad_norm': 0.0006884300846748254, 'learning_rate': 0.6643863963039954, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:40<27:20, 3.81s/it] 17%|█▋ | 90/520 [05:44<27:26, 3.83s/it] {'loss': 1.4315, 'grad_norm': 0.0008200637579466148, 'learning_rate': 0.6634214736300553, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:44<27:26, 3.83s/it] 18%|█▊ | 91/520 [05:48<27:27, 3.84s/it] {'loss': 1.5233, 'grad_norm': 0.0006338742272668764, 'learning_rate': 0.662444373235264, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:48<27:27, 3.84s/it] 18%|█▊ | 92/520 [05:51<27:24, 3.84s/it] {'loss': 1.468, 'grad_norm': 0.000722487085993732, 'learning_rate': 0.661455133084014, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:51<27:24, 3.84s/it] 18%|█▊ | 93/520 [05:55<27:25, 3.85s/it] {'loss': 1.4472, 'grad_norm': 0.0006256844701385743, 'learning_rate': 0.6604537916123775, 'epoch': 0.18} + 18%|█▊ | 93/520 [05:55<27:25, 3.85s/it] 18%|█▊ | 94/520 [05:59<27:28, 3.87s/it] {'loss': 1.5685, 'grad_norm': 0.000676306873050005, 'learning_rate': 0.6594403877266134, 'epoch': 0.18} + 18%|█▊ | 94/520 [05:59<27:28, 3.87s/it] 18%|█▊ | 95/520 [06:03<27:27, 3.88s/it] {'loss': 1.4299, 'grad_norm': 0.0007205923854520879, 'learning_rate': 0.6584149608016547, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:03<27:27, 3.88s/it] 18%|█▊ | 96/520 [06:07<27:33, 3.90s/it] {'loss': 1.4454, 'grad_norm': 0.0005715154951593509, 'learning_rate': 0.6573775506795799, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:07<27:33, 3.90s/it] 19%|█▊ | 97/520 [06:11<27:29, 3.90s/it] {'loss': 1.411, 'grad_norm': 0.000682935019699513, 'learning_rate': 0.6563281976680638, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:11<27:29, 3.90s/it] 19%|█▉ | 98/520 [06:15<27:22, 3.89s/it] {'loss': 1.4242, 'grad_norm': 0.0006235357071969237, 'learning_rate': 0.6552669425388119, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:15<27:22, 3.89s/it] 19%|█▉ | 99/520 [06:19<27:13, 3.88s/it] {'loss': 1.4442, 'grad_norm': 0.0007815911114673015, 'learning_rate': 0.6541938265259762, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:19<27:13, 3.88s/it] 19%|█▉ | 100/520 [06:23<27:04, 3.87s/it] {'loss': 1.5938, 'grad_norm': 0.0006532869647933356, 'learning_rate': 0.6531088913245535, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:23<27:04, 3.87s/it] 19%|█▉ | 101/520 [06:26<26:55, 3.86s/it] {'loss': 1.4471, 'grad_norm': 0.0005884920600000819, 'learning_rate': 0.6520121790887645, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:26<26:55, 3.86s/it] 20%|█▉ | 102/520 [06:30<26:54, 3.86s/it] {'loss': 1.4423, 'grad_norm': 0.0005825848507433886, 'learning_rate': 0.6509037324304165, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:30<26:54, 3.86s/it] 20%|█▉ | 103/520 [06:34<26:24, 3.80s/it] {'loss': 1.391, 'grad_norm': 0.0005141277313191252, 'learning_rate': 0.649783594417248, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:34<26:24, 3.80s/it] 20%|██ | 104/520 [06:38<26:10, 3.77s/it] {'loss': 1.4579, 'grad_norm': 0.0005947030067565183, 'learning_rate': 0.6486518085712544, 'epoch': 0.2} + 20%|██ | 104/520 [06:38<26:10, 3.77s/it] 20%|██ | 105/520 [06:41<25:50, 3.74s/it] {'loss': 1.4615, 'grad_norm': 0.0006053810946016042, 'learning_rate': 0.6475084188669982, 'epoch': 0.2} + 20%|██ | 105/520 [06:41<25:50, 3.74s/it] 20%|██ | 106/520 [06:45<25:35, 3.71s/it] {'loss': 1.6138, 'grad_norm': 0.0006066899654820062, 'learning_rate': 0.6463534697298994, 'epoch': 0.2} + 20%|██ | 106/520 [06:45<25:35, 3.71s/it] 21%|██ | 107/520 [06:49<25:25, 3.69s/it] {'loss': 1.5873, 'grad_norm': 0.0006134812195798412, 'learning_rate': 0.64518700603451, 'epoch': 0.21} + 21%|██ | 107/520 [06:49<25:25, 3.69s/it] 21%|██ | 108/520 [06:52<25:12, 3.67s/it] {'loss': 1.3985, 'grad_norm': 0.0005566370583243479, 'learning_rate': 0.64400907310277, 'epoch': 0.21} + 21%|██ | 108/520 [06:52<25:12, 3.67s/it] 21%|██ | 109/520 [06:56<25:03, 3.66s/it] {'loss': 1.5809, 'grad_norm': 0.0005085042142920145, 'learning_rate': 0.6428197167022469, 'epoch': 0.21} + 21%|██ | 109/520 [06:56<25:03, 3.66s/it] 21%|██ | 110/520 [06:59<24:56, 3.65s/it] {'loss': 1.5856, 'grad_norm': 0.0006079751696949488, 'learning_rate': 0.641618983044357, 'epoch': 0.21} + 21%|██ | 110/520 [06:59<24:56, 3.65s/it] 21%|██▏ | 111/520 [07:03<24:46, 3.63s/it] {'loss': 1.5846, 'grad_norm': 0.0006672739002305168, 'learning_rate': 0.6404069187825706, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:03<24:46, 3.63s/it] 22%|██▏ | 112/520 [07:07<24:41, 3.63s/it] {'loss': 1.4695, 'grad_norm': 0.0005091887683938199, 'learning_rate': 0.6391835710105982, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:07<24:41, 3.63s/it] 22%|██▏ | 113/520 [07:10<24:41, 3.64s/it] {'loss': 1.3347, 'grad_norm': 0.0006103531872582199, 'learning_rate': 0.6379489872605616, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:10<24:41, 3.64s/it] 22%|██▏ | 114/520 [07:14<24:32, 3.63s/it] {'loss': 1.461, 'grad_norm': 0.0005645509387916913, 'learning_rate': 0.6367032155011472, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:14<24:32, 3.63s/it] 22%|██▏ | 115/520 [07:17<24:24, 3.62s/it] {'loss': 1.5718, 'grad_norm': 0.0005314004926530573, 'learning_rate': 0.635446304135741, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:17<24:24, 3.62s/it] 22%|██▏ | 116/520 [07:21<24:24, 3.62s/it] {'loss': 1.5581, 'grad_norm': 0.0005793567165041007, 'learning_rate': 0.6341783020005498, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:21<24:24, 3.62s/it] 22%|██▎ | 117/520 [07:25<24:22, 3.63s/it] {'loss': 1.5502, 'grad_norm': 0.0005421919897229025, 'learning_rate': 0.6328992583627018, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:25<24:22, 3.63s/it] 23%|██▎ | 118/520 [07:28<24:26, 3.65s/it] {'loss': 1.4252, 'grad_norm': 0.0005367007204164092, 'learning_rate': 0.6316092229183339, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:28<24:26, 3.65s/it] 23%|██▎ | 119/520 [07:32<24:17, 3.64s/it] {'loss': 1.3773, 'grad_norm': 0.0006405777166371755, 'learning_rate': 0.6303082457906598, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:32<24:17, 3.64s/it] 23%|██▎ | 120/520 [07:36<24:08, 3.62s/it] {'loss': 1.4144, 'grad_norm': 0.0006395087213662278, 'learning_rate': 0.6289963775280228, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:36<24:08, 3.62s/it] 23%|██▎ | 121/520 [07:39<24:01, 3.61s/it] {'loss': 1.4799, 'grad_norm': 0.0005379114698415874, 'learning_rate': 0.6276736691019322, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:39<24:01, 3.61s/it] 23%|██▎ | 122/520 [07:43<23:59, 3.62s/it] {'loss': 1.3424, 'grad_norm': 0.0005100757878789201, 'learning_rate': 0.6263401719050824, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:43<23:59, 3.62s/it] 24%|██▎ | 123/520 [07:46<23:51, 3.61s/it] {'loss': 1.6561, 'grad_norm': 0.0007925659646345894, 'learning_rate': 0.6249959377493558, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:46<23:51, 3.61s/it] 24%|██▍ | 124/520 [07:50<24:01, 3.64s/it] {'loss': 1.4345, 'grad_norm': 0.00052256615363317, 'learning_rate': 0.6236410188638104, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:50<24:01, 3.64s/it] 24%|██▍ | 125/520 [07:54<23:54, 3.63s/it] {'loss': 1.4045, 'grad_norm': 0.0005816761449121094, 'learning_rate': 0.6222754678926502, 'epoch': 0.24} + 24%|██▍ | 125/520 [07:54<23:54, 3.63s/it] 24%|██▍ | 126/520 [07:58<25:23, 3.87s/it] {'loss': 1.549, 'grad_norm': 0.0006191202543883859, 'learning_rate': 0.6208993378931796, 'epoch': 0.24} + 24%|██▍ | 126/520 [07:58<25:23, 3.87s/it] 24%|██▍ | 127/520 [08:02<25:03, 3.83s/it] {'loss': 1.3809, 'grad_norm': 0.0005088481170702607, 'learning_rate': 0.619512682333742, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:02<25:03, 3.83s/it] 25%|██▍ | 128/520 [08:06<24:41, 3.78s/it] {'loss': 1.4541, 'grad_norm': 0.0005645517745448947, 'learning_rate': 0.6181155550916422, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:06<24:41, 3.78s/it] 25%|██▍ | 129/520 [08:09<24:27, 3.75s/it] {'loss': 1.3811, 'grad_norm': 0.0004293340792377788, 'learning_rate': 0.6167080104510537, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:09<24:27, 3.75s/it] 25%|██▌ | 130/520 [08:13<24:13, 3.73s/it] {'loss': 1.4426, 'grad_norm': 0.00048654348096204936, 'learning_rate': 0.6152901031009085, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:13<24:13, 3.73s/it] 25%|██▌ | 131/520 [08:17<24:04, 3.71s/it] {'loss': 1.5281, 'grad_norm': 0.0004760070199969545, 'learning_rate': 0.6138618881327729, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:17<24:04, 3.71s/it] 25%|██▌ | 132/520 [08:20<23:56, 3.70s/it] {'loss': 1.492, 'grad_norm': 0.0005434196336986363, 'learning_rate': 0.612423421038707, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:20<23:56, 3.70s/it] 26%|██▌ | 133/520 [08:24<23:42, 3.68s/it] {'loss': 1.3908, 'grad_norm': 0.0006045345730664837, 'learning_rate': 0.6109747577091079, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:24<23:42, 3.68s/it] 26%|██▌ | 134/520 [08:28<23:37, 3.67s/it] {'loss': 1.485, 'grad_norm': 0.0004743394563724202, 'learning_rate': 0.6095159544305393, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:28<23:37, 3.67s/it] 26%|██▌ | 135/520 [08:31<23:29, 3.66s/it] {'loss': 1.5488, 'grad_norm': 0.0005693733076255206, 'learning_rate': 0.6080470678835433, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:31<23:29, 3.66s/it] 26%|██▌ | 136/520 [08:35<23:21, 3.65s/it] {'loss': 1.4811, 'grad_norm': 0.000600715706530788, 'learning_rate': 0.6065681551404392, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:35<23:21, 3.65s/it] 26%|██▋ | 137/520 [08:38<23:10, 3.63s/it] {'loss': 1.3903, 'grad_norm': 0.0005898639473731857, 'learning_rate': 0.6050792736631049, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:38<23:10, 3.63s/it] 27%|██▋ | 138/520 [08:42<23:08, 3.63s/it] {'loss': 1.4091, 'grad_norm': 0.00046660852581139836, 'learning_rate': 0.6035804813007454, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:42<23:08, 3.63s/it] 27%|██▋ | 139/520 [08:46<23:10, 3.65s/it] {'loss': 1.4183, 'grad_norm': 0.0004531536375253908, 'learning_rate': 0.6020718362876443, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:46<23:10, 3.65s/it] 27%|██▋ | 140/520 [08:49<23:13, 3.67s/it] {'loss': 1.5681, 'grad_norm': 0.0004360362473839658, 'learning_rate': 0.6005533972409014, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:49<23:13, 3.67s/it] 27%|██▋ | 141/520 [08:53<23:05, 3.66s/it] {'loss': 1.5151, 'grad_norm': 0.0004224420490119624, 'learning_rate': 0.5990252231581555, 'epoch': 0.27} + 27%|██▋ | 141/520 [08:53<23:05, 3.66s/it] 27%|██▋ | 142/520 [08:57<23:09, 3.68s/it] {'loss': 1.6239, 'grad_norm': 0.0006609646889064069, 'learning_rate': 0.5974873734152916, 'epoch': 0.27} + 27%|██▋ | 142/520 [08:57<23:09, 3.68s/it] 28%|██▊ | 143/520 [09:01<23:10, 3.69s/it] {'loss': 1.4438, 'grad_norm': 0.0005063613075768972, 'learning_rate': 0.5959399077641342, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:01<23:10, 3.69s/it] 28%|██▊ | 144/520 [09:04<23:04, 3.68s/it] {'loss': 1.3903, 'grad_norm': 0.0005792218391145593, 'learning_rate': 0.5943828863301254, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:04<23:04, 3.68s/it] 28%|██▊ | 145/520 [09:08<22:56, 3.67s/it] {'loss': 1.3087, 'grad_norm': 0.0004920573314074012, 'learning_rate': 0.5928163696099895, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:08<22:56, 3.67s/it] 28%|██▊ | 146/520 [09:12<22:58, 3.69s/it] {'loss': 1.6545, 'grad_norm': 0.00048538066305047714, 'learning_rate': 0.5912404184693815, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:12<22:58, 3.69s/it] 28%|██▊ | 147/520 [09:15<22:50, 3.67s/it] {'loss': 1.3581, 'grad_norm': 0.0005326062019357025, 'learning_rate': 0.5896550941405226, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:15<22:50, 3.67s/it] 28%|██▊ | 148/520 [09:19<22:47, 3.67s/it] {'loss': 1.4, 'grad_norm': 0.0005549866031893333, 'learning_rate': 0.5880604582198217, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:19<22:47, 3.67s/it] 29%|██▊ | 149/520 [09:23<22:41, 3.67s/it] {'loss': 1.3469, 'grad_norm': 0.00044835270774194074, 'learning_rate': 0.5864565726654811, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:23<22:41, 3.67s/it] 29%|██▉ | 150/520 [09:26<22:35, 3.66s/it] {'loss': 1.5736, 'grad_norm': 0.0006132272388524652, 'learning_rate': 0.5848434997950894, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:26<22:35, 3.66s/it] 29%|██▉ | 151/520 [09:30<22:31, 3.66s/it] {'loss': 1.3888, 'grad_norm': 0.0005128744962874684, 'learning_rate': 0.5832213022832013, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:30<22:31, 3.66s/it] 29%|██▉ | 152/520 [09:33<22:18, 3.64s/it] {'loss': 1.3536, 'grad_norm': 0.0006022799825330078, 'learning_rate': 0.5815900431589008, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:33<22:18, 3.64s/it] 29%|██▉ | 153/520 [09:37<22:10, 3.63s/it] {'loss': 1.3845, 'grad_norm': 0.00045732959423595447, 'learning_rate': 0.5799497858033532, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:37<22:10, 3.63s/it] 30%|██▉ | 154/520 [09:41<22:05, 3.62s/it] {'loss': 1.4813, 'grad_norm': 0.0004019979443658827, 'learning_rate': 0.5783005939473423, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:41<22:05, 3.62s/it] 30%|██▉ | 155/520 [09:44<22:01, 3.62s/it] {'loss': 1.3738, 'grad_norm': 0.0005949493951650931, 'learning_rate': 0.5766425316687946, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:44<22:01, 3.62s/it] 30%|███ | 156/520 [09:48<21:56, 3.62s/it] {'loss': 1.4281, 'grad_norm': 0.0004650457856816352, 'learning_rate': 0.5749756633902887, 'epoch': 0.3} + 30%|███ | 156/520 [09:48<21:56, 3.62s/it] 30%|███ | 157/520 [09:52<21:57, 3.63s/it] {'loss': 1.6751, 'grad_norm': 0.000524268535541836, 'learning_rate': 0.573300053876553, 'epoch': 0.3} + 30%|███ | 157/520 [09:52<21:57, 3.63s/it] 30%|███ | 158/520 [09:55<21:55, 3.63s/it] {'loss': 1.3841, 'grad_norm': 0.0005928830537083442, 'learning_rate': 0.5716157682319489, 'epoch': 0.3} + 30%|███ | 158/520 [09:55<21:55, 3.63s/it] 31%|███ | 159/520 [09:59<21:51, 3.63s/it] {'loss': 1.4168, 'grad_norm': 0.0004892590822513328, 'learning_rate': 0.5699228718979416, 'epoch': 0.31} + 31%|███ | 159/520 [09:59<21:51, 3.63s/it] 31%|███ | 160/520 [10:02<21:52, 3.64s/it] {'loss': 1.4533, 'grad_norm': 0.0004677946107439843, 'learning_rate': 0.5682214306505567, 'epoch': 0.31} + 31%|███ | 160/520 [10:03<21:52, 3.64s/it] 31%|███ | 161/520 [10:06<21:46, 3.64s/it] {'loss': 1.4324, 'grad_norm': 0.0005256043768643006, 'learning_rate': 0.5665115105978258, 'epoch': 0.31} + 31%|███ | 161/520 [10:06<21:46, 3.64s/it] 31%|███ | 162/520 [10:10<21:46, 3.65s/it] {'loss': 1.5611, 'grad_norm': 0.00047015189432316373, 'learning_rate': 0.5647931781772165, 'epoch': 0.31} + 31%|███ | 162/520 [10:10<21:46, 3.65s/it] 31%|███▏ | 163/520 [10:13<21:41, 3.65s/it] {'loss': 1.3263, 'grad_norm': 0.0004989727032501889, 'learning_rate': 0.5630665001530522, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:13<21:41, 3.65s/it] 32%|███▏ | 164/520 [10:17<21:45, 3.67s/it] {'loss': 1.2711, 'grad_norm': 0.000410065085129149, 'learning_rate': 0.561331543613917, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:17<21:45, 3.67s/it] 32%|███▏ | 165/520 [10:21<21:43, 3.67s/it] {'loss': 1.4146, 'grad_norm': 0.0004984380195589685, 'learning_rate': 0.5595883759700501, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:21<21:43, 3.67s/it] 32%|███▏ | 166/520 [10:24<21:32, 3.65s/it] {'loss': 1.4004, 'grad_norm': 0.0004642192962916302, 'learning_rate': 0.5578370649507255, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:24<21:32, 3.65s/it] 32%|███▏ | 167/520 [10:28<21:30, 3.65s/it] {'loss': 1.3915, 'grad_norm': 0.000437927882417953, 'learning_rate': 0.5560776786016216, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:28<21:30, 3.65s/it] 32%|███▏ | 168/520 [10:32<21:32, 3.67s/it] {'loss': 1.3467, 'grad_norm': 0.0005121807284286779, 'learning_rate': 0.5543102852821764, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:32<21:32, 3.67s/it] 32%|███▎ | 169/520 [10:36<21:38, 3.70s/it] {'loss': 1.4124, 'grad_norm': 0.0004246274586839009, 'learning_rate': 0.552534953662932, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:36<21:38, 3.70s/it] 33%|███▎ | 170/520 [10:40<21:59, 3.77s/it] {'loss': 1.4986, 'grad_norm': 0.00047506095331234646, 'learning_rate': 0.550751752722866, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:40<21:59, 3.77s/it] 33%|███▎ | 171/520 [10:43<22:05, 3.80s/it] {'loss': 1.3515, 'grad_norm': 0.00042105061728479486, 'learning_rate': 0.5489607517467123, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:43<22:05, 3.80s/it] 33%|███▎ | 172/520 [10:47<22:15, 3.84s/it] {'loss': 1.4217, 'grad_norm': 0.000369544178655511, 'learning_rate': 0.5471620203222677, 'epoch': 0.33} + 33%|███▎ | 172/520 [10:47<22:15, 3.84s/it] 33%|███▎ | 173/520 [10:51<22:21, 3.87s/it] {'loss': 1.3548, 'grad_norm': 0.00037594689032368283, 'learning_rate': 0.5453556283376894, 'epoch': 0.33} + 33%|███▎ | 173/520 [10:51<22:21, 3.87s/it] 33%|███▎ | 174/520 [10:55<22:17, 3.86s/it] {'loss': 1.4272, 'grad_norm': 0.0005234870908242887, 'learning_rate': 0.5435416459787787, 'epoch': 0.33} + 33%|███▎ | 174/520 [10:55<22:17, 3.86s/it] 34%|███▎ | 175/520 [10:59<22:16, 3.87s/it] {'loss': 1.3279, 'grad_norm': 0.0007086287338074692, 'learning_rate': 0.541720143726255, 'epoch': 0.34} + 34%|███▎ | 175/520 [10:59<22:16, 3.87s/it] 34%|███▍ | 176/520 [11:03<22:14, 3.88s/it] {'loss': 1.5914, 'grad_norm': 0.00045907805593949845, 'learning_rate': 0.5398911923530157, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:03<22:14, 3.88s/it] 34%|███▍ | 177/520 [11:07<22:12, 3.89s/it] {'loss': 1.4652, 'grad_norm': 0.00043331581667439595, 'learning_rate': 0.5380548629213884, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:07<22:12, 3.89s/it] 34%|███▍ | 178/520 [11:11<22:09, 3.89s/it] {'loss': 1.4062, 'grad_norm': 0.0004156351594539786, 'learning_rate': 0.5362112267803678, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:11<22:09, 3.89s/it] 34%|███▍ | 179/520 [11:15<22:04, 3.88s/it] {'loss': 1.4927, 'grad_norm': 0.000492325027196588, 'learning_rate': 0.5343603555628452, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:15<22:04, 3.88s/it] 35%|███▍ | 180/520 [11:18<22:05, 3.90s/it] {'loss': 1.3778, 'grad_norm': 0.0003645454515739654, 'learning_rate': 0.5325023211828243, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:18<22:05, 3.90s/it] 35%|███▍ | 181/520 [11:22<22:02, 3.90s/it] {'loss': 1.3823, 'grad_norm': 0.0003363324509945618, 'learning_rate': 0.5306371958326273, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:22<22:02, 3.90s/it] 35%|███▌ | 182/520 [11:26<22:05, 3.92s/it] {'loss': 1.3953, 'grad_norm': 0.0003739464290727949, 'learning_rate': 0.5287650519800899, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:26<22:05, 3.92s/it] 35%|███▌ | 183/520 [11:30<21:58, 3.91s/it] {'loss': 1.4206, 'grad_norm': 0.00041679237551872254, 'learning_rate': 0.5268859623657458, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:30<21:58, 3.91s/it] 35%|███▌ | 184/520 [11:34<21:52, 3.90s/it] {'loss': 1.3319, 'grad_norm': 0.00041673547508569203, 'learning_rate': 0.5249999999999999, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:34<21:52, 3.90s/it] 36%|███▌ | 185/520 [11:38<21:45, 3.90s/it] {'loss': 1.4983, 'grad_norm': 0.0003692884140200449, 'learning_rate': 0.5231072381602926, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:38<21:45, 3.90s/it] 36%|███▌ | 186/520 [11:42<21:36, 3.88s/it] {'loss': 1.363, 'grad_norm': 0.0004301776475517256, 'learning_rate': 0.5212077503882513, 'epoch': 0.36} + 36%|███▌ | 186/520 [11:42<21:36, 3.88s/it] 36%|███▌ | 187/520 [11:46<21:29, 3.87s/it] {'loss': 1.3571, 'grad_norm': 0.00044899248028487627, 'learning_rate': 0.5193016104868339, 'epoch': 0.36} + 36%|███▌ | 187/520 [11:46<21:29, 3.87s/it] 36%|███▌ | 188/520 [11:50<21:23, 3.87s/it] {'loss': 1.4492, 'grad_norm': 0.0004083239873588501, 'learning_rate': 0.5173888925174613, 'epoch': 0.36} + 36%|███▌ | 188/520 [11:50<21:23, 3.87s/it] 36%|███▋ | 189/520 [11:53<21:15, 3.85s/it] {'loss': 1.4572, 'grad_norm': 0.0006251849900702334, 'learning_rate': 0.5154696707971395, 'epoch': 0.36} + 36%|███▋ | 189/520 [11:53<21:15, 3.85s/it] 37%|███▋ | 190/520 [11:57<21:11, 3.85s/it] {'loss': 1.3659, 'grad_norm': 0.00046045204174072115, 'learning_rate': 0.5135440198955716, 'epoch': 0.37} + 37%|███▋ | 190/520 [11:57<21:11, 3.85s/it] 37%|███▋ | 191/520 [12:01<21:06, 3.85s/it] {'loss': 1.3183, 'grad_norm': 0.0003831320174981247, 'learning_rate': 0.5116120146322619, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:01<21:06, 3.85s/it] 37%|███▋ | 192/520 [12:05<21:02, 3.85s/it] {'loss': 1.4111, 'grad_norm': 0.00039384905948778594, 'learning_rate': 0.5096737300736071, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:05<21:02, 3.85s/it] 37%|███▋ | 193/520 [12:09<20:57, 3.84s/it] {'loss': 1.5384, 'grad_norm': 0.0005403354566964257, 'learning_rate': 0.5077292415299809, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:09<20:57, 3.84s/it] 37%|███▋ | 194/520 [12:13<20:59, 3.86s/it] {'loss': 1.407, 'grad_norm': 0.0003986156031418663, 'learning_rate': 0.5057786245528072, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:13<20:59, 3.86s/it] 38%|███▊ | 195/520 [12:17<20:53, 3.86s/it] {'loss': 1.426, 'grad_norm': 0.0003591430688098979, 'learning_rate': 0.5038219549316257, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:17<20:53, 3.86s/it] 38%|███▊ | 196/520 [12:20<20:50, 3.86s/it] {'loss': 1.4065, 'grad_norm': 0.000592586753530145, 'learning_rate': 0.5018593086911453, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:20<20:50, 3.86s/it] 38%|███▊ | 197/520 [12:24<20:43, 3.85s/it] {'loss': 1.3604, 'grad_norm': 0.000569920916143525, 'learning_rate': 0.4998907620882919, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:24<20:43, 3.85s/it] 38%|███▊ | 198/520 [12:28<20:41, 3.86s/it] {'loss': 1.4354, 'grad_norm': 0.0004635347834670239, 'learning_rate': 0.4979163916092448, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:28<20:41, 3.86s/it] 38%|███▊ | 199/520 [12:32<20:42, 3.87s/it] {'loss': 1.3362, 'grad_norm': 0.00045759352034181484, 'learning_rate': 0.49593627396646484, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:32<20:42, 3.87s/it] 38%|███▊ | 200/520 [12:36<20:35, 3.86s/it] {'loss': 1.477, 'grad_norm': 0.0005823475540287287, 'learning_rate': 0.49395048609571407, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:36<20:35, 3.86s/it] 39%|███▊ | 201/520 [12:40<20:33, 3.87s/it] {'loss': 1.4827, 'grad_norm': 0.00037538435435988614, 'learning_rate': 0.49195910515306623, 'epoch': 0.39} + 39%|███▊ | 201/520 [12:40<20:33, 3.87s/it] 39%|███▉ | 202/520 [12:44<20:26, 3.86s/it] {'loss': 1.3328, 'grad_norm': 0.0007121378663701683, 'learning_rate': 0.48996220851190925, 'epoch': 0.39} + 39%|███▉ | 202/520 [12:44<20:26, 3.86s/it] 39%|███▉ | 203/520 [12:47<20:23, 3.86s/it] {'loss': 1.3887, 'grad_norm': 0.0006176260421852671, 'learning_rate': 0.48795987375993877, 'epoch': 0.39} + 39%|███▉ | 203/520 [12:47<20:23, 3.86s/it] 39%|███▉ | 204/520 [12:51<20:18, 3.86s/it] {'loss': 1.4305, 'grad_norm': 0.0006000916081086537, 'learning_rate': 0.48595217869614316, 'epoch': 0.39} + 39%|███▉ | 204/520 [12:51<20:18, 3.86s/it] 39%|███▉ | 205/520 [12:55<20:14, 3.85s/it] {'loss': 1.4958, 'grad_norm': 0.00042877407779576705, 'learning_rate': 0.4839392013277814, 'epoch': 0.39} + 39%|███▉ | 205/520 [12:55<20:14, 3.85s/it] 40%|███▉ | 206/520 [12:59<20:13, 3.86s/it] {'loss': 1.4614, 'grad_norm': 0.0005029769525853474, 'learning_rate': 0.4819210198673518, 'epoch': 0.4} + 40%|███▉ | 206/520 [12:59<20:13, 3.86s/it] 40%|███▉ | 207/520 [13:03<20:09, 3.86s/it] {'loss': 1.4704, 'grad_norm': 0.00040645437718785034, 'learning_rate': 0.4798977127295532, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:03<20:09, 3.86s/it] 40%|████ | 208/520 [13:07<20:08, 3.87s/it] {'loss': 1.415, 'grad_norm': 0.00042028940237516684, 'learning_rate': 0.4778693585282383, 'epoch': 0.4} + 40%|████ | 208/520 [13:07<20:08, 3.87s/it] 40%|████ | 209/520 [13:11<20:06, 3.88s/it] {'loss': 1.3631, 'grad_norm': 0.000385645957964509, 'learning_rate': 0.4758360360733586, 'epoch': 0.4} + 40%|████ | 209/520 [13:11<20:06, 3.88s/it] 40%|████ | 210/520 [13:14<19:59, 3.87s/it] {'loss': 1.4122, 'grad_norm': 0.00037267556239902995, 'learning_rate': 0.47379782436790346, 'epoch': 0.4} + 40%|████ | 210/520 [13:14<19:59, 3.87s/it] 41%|████ | 211/520 [13:18<19:57, 3.88s/it] {'loss': 1.44, 'grad_norm': 0.0003483179801092908, 'learning_rate': 0.47175480260482944, 'epoch': 0.41} + 41%|████ | 211/520 [13:18<19:57, 3.88s/it] 41%|████ | 212/520 [13:22<19:56, 3.89s/it] {'loss': 1.4283, 'grad_norm': 0.00041555788998555606, 'learning_rate': 0.46970705016398406, 'epoch': 0.41} + 41%|████ | 212/520 [13:22<19:56, 3.89s/it] 41%|████ | 213/520 [13:26<19:49, 3.87s/it] {'loss': 1.3679, 'grad_norm': 0.0005534878682860338, 'learning_rate': 0.4676546466090208, 'epoch': 0.41} + 41%|████ | 213/520 [13:26<19:49, 3.87s/it] 41%|████ | 214/520 [13:30<19:47, 3.88s/it] {'loss': 1.4, 'grad_norm': 0.0003901448598505907, 'learning_rate': 0.4655976716843085, 'epoch': 0.41} + 41%|████ | 214/520 [13:30<19:47, 3.88s/it] 41%|████▏ | 215/520 [13:34<19:41, 3.87s/it] {'loss': 1.3962, 'grad_norm': 0.000478156197001027, 'learning_rate': 0.4635362053118325, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:34<19:41, 3.87s/it] 42%|████▏ | 216/520 [13:38<19:39, 3.88s/it] {'loss': 1.272, 'grad_norm': 0.00045739458783415317, 'learning_rate': 0.46147032758808954, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:38<19:39, 3.88s/it] 42%|████▏ | 217/520 [13:42<19:44, 3.91s/it] {'loss': 1.4222, 'grad_norm': 0.00040600784665014805, 'learning_rate': 0.4594001187809756, 'epoch': 0.42} + 42%|████▏ | 217/520 [13:42<19:44, 3.91s/it] 42%|████▏ | 218/520 [13:46<19:37, 3.90s/it] {'loss': 1.4107, 'grad_norm': 0.0004091555856804972, 'learning_rate': 0.457325659326667, 'epoch': 0.42} + 42%|████▏ | 218/520 [13:46<19:37, 3.90s/it] 42%|████▏ | 219/520 [13:49<19:30, 3.89s/it] {'loss': 1.3925, 'grad_norm': 0.0004196906688103495, 'learning_rate': 0.4552470298264955, 'epoch': 0.42} + 42%|████▏ | 219/520 [13:49<19:30, 3.89s/it] 42%|████▏ | 220/520 [13:53<19:19, 3.86s/it] {'loss': 1.4584, 'grad_norm': 0.00043536958692209704, 'learning_rate': 0.45316431104381644, 'epoch': 0.42} + 42%|████▏ | 220/520 [13:53<19:19, 3.86s/it] 42%|████▎ | 221/520 [13:57<19:20, 3.88s/it] {'loss': 1.4194, 'grad_norm': 0.0003707887185256983, 'learning_rate': 0.45107758390087044, 'epoch': 0.42} + 42%|████▎ | 221/520 [13:57<19:20, 3.88s/it] 43%|████▎ | 222/520 [14:01<19:15, 3.88s/it] {'loss': 1.3234, 'grad_norm': 0.0003643147196070816, 'learning_rate': 0.44898692947563956, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:01<19:15, 3.88s/it] 43%|████▎ | 223/520 [14:05<19:15, 3.89s/it] {'loss': 1.3245, 'grad_norm': 0.0003643540628116663, 'learning_rate': 0.44689242899869724, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:05<19:15, 3.89s/it] 43%|████▎ | 224/520 [14:09<19:07, 3.88s/it] {'loss': 1.7018, 'grad_norm': 0.0005576865699041741, 'learning_rate': 0.44479416385005177, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:09<19:07, 3.88s/it] 43%|████▎ | 225/520 [14:13<19:08, 3.89s/it] {'loss': 1.3318, 'grad_norm': 0.00041198105490449134, 'learning_rate': 0.4426922155559845, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:13<19:08, 3.89s/it] 43%|████▎ | 226/520 [14:17<19:01, 3.88s/it] {'loss': 1.4406, 'grad_norm': 0.00042321950016219363, 'learning_rate': 0.44058666578588224, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:17<19:01, 3.88s/it] 44%|████▎ | 227/520 [14:21<19:04, 3.91s/it] {'loss': 1.426, 'grad_norm': 0.0003794395024585332, 'learning_rate': 0.4384775963490641, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:21<19:04, 3.91s/it] 44%|████▍ | 228/520 [14:24<18:53, 3.88s/it] {'loss': 1.6138, 'grad_norm': 0.0005124080502222217, 'learning_rate': 0.4363650891916027, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:24<18:53, 3.88s/it] 44%|████▍ | 229/520 [14:28<18:53, 3.90s/it] {'loss': 1.4234, 'grad_norm': 0.00043188514413666334, 'learning_rate': 0.43424922639314056, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:28<18:53, 3.90s/it] 44%|████▍ | 230/520 [14:32<18:46, 3.88s/it] {'loss': 1.2738, 'grad_norm': 0.0003841599482036287, 'learning_rate': 0.43213009016370035, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:32<18:46, 3.88s/it] 44%|████▍ | 231/520 [14:36<18:44, 3.89s/it] {'loss': 1.3568, 'grad_norm': 0.0004993284962671778, 'learning_rate': 0.43000776284049136, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:36<18:44, 3.89s/it] 45%|████▍ | 232/520 [14:40<18:34, 3.87s/it] {'loss': 1.645, 'grad_norm': 0.0003912341167766037, 'learning_rate': 0.42788232688471, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:40<18:34, 3.87s/it] 45%|████▍ | 233/520 [14:44<18:13, 3.81s/it] {'loss': 1.5037, 'grad_norm': 0.0004946513764152035, 'learning_rate': 0.425753864878336, 'epoch': 0.45} + 45%|████▍ | 233/520 [14:44<18:13, 3.81s/it] 45%|████▌ | 234/520 [14:47<18:10, 3.81s/it] {'loss': 1.3197, 'grad_norm': 0.00040103481582824755, 'learning_rate': 0.42362245952092353, 'epoch': 0.45} + 45%|████▌ | 234/520 [14:47<18:10, 3.81s/it] 45%|████▌ | 235/520 [14:51<18:17, 3.85s/it] {'loss': 1.3535, 'grad_norm': 0.00036495445396793466, 'learning_rate': 0.4214881936263881, 'epoch': 0.45} + 45%|████▌ | 235/520 [14:51<18:17, 3.85s/it] 45%|████▌ | 236/520 [14:55<18:16, 3.86s/it] {'loss': 1.4709, 'grad_norm': 0.0005397956680923768, 'learning_rate': 0.41935115011978913, 'epoch': 0.45} + 45%|████▌ | 236/520 [14:55<18:16, 3.86s/it] 46%|████▌ | 237/520 [14:59<18:18, 3.88s/it] {'loss': 1.4204, 'grad_norm': 0.0004352941457881624, 'learning_rate': 0.41721141203410766, 'epoch': 0.46} + 46%|████▌ | 237/520 [14:59<18:18, 3.88s/it] 46%|████▌ | 238/520 [15:03<18:16, 3.89s/it] {'loss': 1.3821, 'grad_norm': 0.000504436492207963, 'learning_rate': 0.41506906250702014, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:03<18:16, 3.89s/it] 46%|████▌ | 239/520 [15:07<18:16, 3.90s/it] {'loss': 1.469, 'grad_norm': 0.0006930841097672487, 'learning_rate': 0.4129241847776684, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:07<18:16, 3.90s/it] 46%|████▌ | 240/520 [15:11<17:52, 3.83s/it] {'loss': 1.236, 'grad_norm': 0.0003266440689465717, 'learning_rate': 0.4107768621834257, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:11<17:52, 3.83s/it] 46%|████▋ | 241/520 [15:14<17:35, 3.78s/it] {'loss': 1.3102, 'grad_norm': 0.0005272894147802227, 'learning_rate': 0.4086271781566577, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:14<17:35, 3.78s/it] 47%|████▋ | 242/520 [15:18<17:24, 3.76s/it] {'loss': 1.3511, 'grad_norm': 0.0003301391975708256, 'learning_rate': 0.4064752162214823, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:18<17:24, 3.76s/it] 47%|████▋ | 243/520 [15:22<17:14, 3.73s/it] {'loss': 1.3404, 'grad_norm': 0.0006011848562304917, 'learning_rate': 0.40432105999052304, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:22<17:14, 3.73s/it] 47%|████▋ | 244/520 [15:25<17:04, 3.71s/it] {'loss': 1.4788, 'grad_norm': 0.0006276682726671073, 'learning_rate': 0.402164793161661, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:25<17:04, 3.71s/it] 47%|████▋ | 245/520 [15:29<17:01, 3.72s/it] {'loss': 1.3326, 'grad_norm': 0.0004364458688383395, 'learning_rate': 0.400006499514783, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:29<17:01, 3.72s/it] 47%|████▋ | 246/520 [15:33<16:54, 3.70s/it] {'loss': 1.6157, 'grad_norm': 0.0004401614548529732, 'learning_rate': 0.3978462629085257, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:33<16:54, 3.70s/it] 48%|████▊ | 247/520 [15:36<16:49, 3.70s/it] {'loss': 1.5113, 'grad_norm': 0.0006219305953162592, 'learning_rate': 0.3956841672770181, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:36<16:49, 3.70s/it] 48%|████▊ | 248/520 [15:40<16:43, 3.69s/it] {'loss': 1.3071, 'grad_norm': 0.000399218830779164, 'learning_rate': 0.39352029662661986, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:40<16:43, 3.69s/it] 48%|████▊ | 249/520 [15:44<16:40, 3.69s/it] {'loss': 1.4582, 'grad_norm': 0.000391045547460413, 'learning_rate': 0.3913547350326575, 'epoch': 0.48} + 48%|████▊ | 249/520 [15:44<16:40, 3.69s/it] 48%|████▊ | 250/520 [15:48<16:42, 3.71s/it] {'loss': 1.389, 'grad_norm': 0.0004644028767672348, 'learning_rate': 0.3891875666361577, 'epoch': 0.48} + 48%|████▊ | 250/520 [15:48<16:42, 3.71s/it] 48%|████▊ | 251/520 [15:51<16:38, 3.71s/it] {'loss': 1.4396, 'grad_norm': 0.0003860694758833717, 'learning_rate': 0.38701887564057824, 'epoch': 0.48} + 48%|████▊ | 251/520 [15:51<16:38, 3.71s/it] 48%|████▊ | 252/520 [15:55<16:30, 3.69s/it] {'loss': 1.4798, 'grad_norm': 0.0004610668523848268, 'learning_rate': 0.3848487463085358, 'epoch': 0.48} + 48%|████▊ | 252/520 [15:55<16:30, 3.69s/it] 49%|████▊ | 253/520 [15:59<16:36, 3.73s/it] {'loss': 1.4276, 'grad_norm': 0.0003983039406874994, 'learning_rate': 0.38267726295853266, 'epoch': 0.49} + 49%|████▊ | 253/520 [15:59<16:36, 3.73s/it] 49%|████▉ | 254/520 [16:02<16:30, 3.72s/it] {'loss': 1.3348, 'grad_norm': 0.0003781994841506003, 'learning_rate': 0.3805045099616804, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:03<16:30, 3.72s/it] 49%|████▉ | 255/520 [16:06<16:26, 3.72s/it] {'loss': 1.3707, 'grad_norm': 0.0003964689809782615, 'learning_rate': 0.37833057173842116, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:06<16:26, 3.72s/it] 49%|████▉ | 256/520 [16:10<16:25, 3.73s/it] {'loss': 1.4152, 'grad_norm': 0.00044486858801741136, 'learning_rate': 0.3761555327552485, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:10<16:25, 3.73s/it] 49%|████▉ | 257/520 [16:14<16:19, 3.73s/it] {'loss': 1.4179, 'grad_norm': 0.000563708968423416, 'learning_rate': 0.3739794775214248, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:14<16:19, 3.73s/it] 50%|████▉ | 258/520 [16:17<16:15, 3.72s/it] {'loss': 1.4252, 'grad_norm': 0.0003334791666088316, 'learning_rate': 0.37180249058569825, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:17<16:15, 3.72s/it] 50%|████▉ | 259/520 [16:21<16:10, 3.72s/it] {'loss': 1.493, 'grad_norm': 0.0005130813953514486, 'learning_rate': 0.3696246565330171, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:21<16:10, 3.72s/it] 50%|█████ | 260/520 [16:25<16:03, 3.71s/it] {'loss': 1.5968, 'grad_norm': 0.0003604785827970125, 'learning_rate': 0.36744605998124397, 'epoch': 0.5} + 50%|█████ | 260/520 [16:25<16:03, 3.71s/it] 50%|█████ | 261/520 [16:29<16:02, 3.72s/it] {'loss': 1.5123, 'grad_norm': 0.00041570933745595554, 'learning_rate': 0.3652667855778676, 'epoch': 0.5} + 50%|█████ | 261/520 [16:29<16:02, 3.72s/it] 50%|█████ | 262/520 [16:32<15:58, 3.71s/it] {'loss': 1.3509, 'grad_norm': 0.00033189302222579167, 'learning_rate': 0.36308691799671405, 'epoch': 0.5} + 50%|█████ | 262/520 [16:32<15:58, 3.71s/it] 51%|█████ | 263/520 [16:36<15:54, 3.71s/it] {'loss': 1.5307, 'grad_norm': 0.0003742494065954192, 'learning_rate': 0.3609065419346566, 'epoch': 0.51} + 51%|█████ | 263/520 [16:36<15:54, 3.71s/it] 51%|█████ | 264/520 [16:40<15:48, 3.70s/it] {'loss': 1.4472, 'grad_norm': 0.00035756330395452516, 'learning_rate': 0.35872574210832553, 'epoch': 0.51} + 51%|█████ | 264/520 [16:40<15:48, 3.70s/it] 51%|█████ | 265/520 [16:43<15:46, 3.71s/it] {'loss': 1.3579, 'grad_norm': 0.0005135481258453603, 'learning_rate': 0.3565446032508158, 'epoch': 0.51} + 51%|█████ | 265/520 [16:43<15:46, 3.71s/it] 51%|█████ | 266/520 [16:47<15:44, 3.72s/it] {'loss': 1.2056, 'grad_norm': 0.0003379209052307014, 'learning_rate': 0.3543632101083953, 'epoch': 0.51} + 51%|█████ | 266/520 [16:47<15:44, 3.72s/it] 51%|█████▏ | 267/520 [16:51<15:43, 3.73s/it] {'loss': 1.3485, 'grad_norm': 0.0005185922592032608, 'learning_rate': 0.35218164743721175, 'epoch': 0.51} + 51%|█████▏ | 267/520 [16:51<15:43, 3.73s/it] 52%|█████▏ | 268/520 [16:55<15:35, 3.71s/it] {'loss': 1.6653, 'grad_norm': 0.0007761039510071155, 'learning_rate': 0.35, 'epoch': 0.52} + 52%|█████▏ | 268/520 [16:55<15:35, 3.71s/it] 52%|█████▏ | 269/520 [16:58<15:30, 3.71s/it] {'loss': 1.4395, 'grad_norm': 0.0003843144144230959, 'learning_rate': 0.3478183525627882, 'epoch': 0.52} + 52%|█████▏ | 269/520 [16:58<15:30, 3.71s/it] 52%|█████▏ | 270/520 [17:02<15:25, 3.70s/it] {'loss': 1.4368, 'grad_norm': 0.0003274232540073517, 'learning_rate': 0.34563678989160473, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:02<15:25, 3.70s/it] 52%|█████▏ | 271/520 [17:06<15:20, 3.70s/it] {'loss': 1.4471, 'grad_norm': 0.0005149405519333684, 'learning_rate': 0.3434553967491843, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:06<15:20, 3.70s/it] 52%|█████▏ | 272/520 [17:09<15:18, 3.70s/it] {'loss': 1.4581, 'grad_norm': 0.00045824485187017185, 'learning_rate': 0.34127425789167454, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:09<15:18, 3.70s/it] 52%|█████▎ | 273/520 [17:13<15:13, 3.70s/it] {'loss': 1.5758, 'grad_norm': 0.0004507562158926561, 'learning_rate': 0.33909345806534347, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:13<15:13, 3.70s/it] 53%|█████▎ | 274/520 [17:17<15:09, 3.70s/it] {'loss': 1.3964, 'grad_norm': 0.00045278821978501314, 'learning_rate': 0.33691308200328607, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:17<15:09, 3.70s/it] 53%|█████▎ | 275/520 [17:20<15:04, 3.69s/it] {'loss': 1.3375, 'grad_norm': 0.0003779922368285756, 'learning_rate': 0.33473321442213244, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:20<15:04, 3.69s/it] 53%|█████▎ | 276/520 [17:24<14:58, 3.68s/it] {'loss': 1.4342, 'grad_norm': 0.00037751899001241886, 'learning_rate': 0.33255394001875593, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:24<14:58, 3.68s/it] 53%|█████▎ | 277/520 [17:28<14:57, 3.70s/it] {'loss': 1.5744, 'grad_norm': 0.00036680534078172917, 'learning_rate': 0.33037534346698283, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:28<14:57, 3.70s/it] 53%|█████▎ | 278/520 [17:31<14:58, 3.71s/it] {'loss': 1.3075, 'grad_norm': 0.0003453799353866909, 'learning_rate': 0.32819750941430176, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:31<14:58, 3.71s/it] 54%|█████▎ | 279/520 [17:35<14:51, 3.70s/it] {'loss': 1.4898, 'grad_norm': 0.00037980854404922026, 'learning_rate': 0.32602052247857516, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:35<14:51, 3.70s/it] 54%|█████▍ | 280/520 [17:39<14:49, 3.71s/it] {'loss': 1.3414, 'grad_norm': 0.0003674300160350654, 'learning_rate': 0.3238444672447515, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:39<14:49, 3.71s/it] 54%|█████▍ | 281/520 [17:43<14:43, 3.70s/it] {'loss': 1.4655, 'grad_norm': 0.0004321585467356303, 'learning_rate': 0.3216694282615788, 'epoch': 0.54} + 54%|█████▍ | 281/520 [17:43<14:43, 3.70s/it] 54%|█████▍ | 282/520 [17:46<14:38, 3.69s/it] {'loss': 1.3243, 'grad_norm': 0.0005519706562004264, 'learning_rate': 0.3194954900383196, 'epoch': 0.54} + 54%|█████▍ | 282/520 [17:46<14:38, 3.69s/it] 54%|█████▍ | 283/520 [17:50<14:33, 3.69s/it] {'loss': 1.4996, 'grad_norm': 0.00036522210598867113, 'learning_rate': 0.3173227370414673, 'epoch': 0.54} + 54%|█████▍ | 283/520 [17:50<14:33, 3.69s/it] 55%|█████▍ | 284/520 [17:54<14:28, 3.68s/it] {'loss': 1.4389, 'grad_norm': 0.0005978282386500071, 'learning_rate': 0.3151512536914642, 'epoch': 0.55} + 55%|█████▍ | 284/520 [17:54<14:28, 3.68s/it] 55%|█████▍ | 285/520 [17:57<14:26, 3.69s/it] {'loss': 1.3478, 'grad_norm': 0.00037423106437950313, 'learning_rate': 0.31298112435942177, 'epoch': 0.55} + 55%|█████▍ | 285/520 [17:57<14:26, 3.69s/it] 55%|█████▌ | 286/520 [18:01<14:21, 3.68s/it] {'loss': 1.2058, 'grad_norm': 0.0004211115014260953, 'learning_rate': 0.31081243336384223, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:01<14:21, 3.68s/it] 55%|█████▌ | 287/520 [18:05<14:15, 3.67s/it] {'loss': 1.4748, 'grad_norm': 0.0003686066057968398, 'learning_rate': 0.3086452649673425, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:05<14:15, 3.67s/it] 55%|█████▌ | 288/520 [18:08<14:13, 3.68s/it] {'loss': 1.5157, 'grad_norm': 0.0003203267083591794, 'learning_rate': 0.30647970337338026, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:08<14:13, 3.68s/it] 56%|█████▌ | 289/520 [18:12<14:09, 3.68s/it] {'loss': 1.3601, 'grad_norm': 0.00041075324359441973, 'learning_rate': 0.304315832722982, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:12<14:09, 3.68s/it] 56%|█████▌ | 290/520 [18:16<14:06, 3.68s/it] {'loss': 1.2935, 'grad_norm': 0.0004494609493810803, 'learning_rate': 0.30215373709147436, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:16<14:06, 3.68s/it] 56%|█████▌ | 291/520 [18:19<14:07, 3.70s/it] {'loss': 1.3289, 'grad_norm': 0.0005922743473112737, 'learning_rate': 0.29999350048521706, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:19<14:07, 3.70s/it] 56%|█████▌ | 292/520 [18:23<14:01, 3.69s/it] {'loss': 1.3922, 'grad_norm': 0.00033398038927556805, 'learning_rate': 0.29783520683833886, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:23<14:01, 3.69s/it] 56%|█████▋ | 293/520 [18:27<13:56, 3.69s/it] {'loss': 1.3269, 'grad_norm': 0.00034708935976781853, 'learning_rate': 0.2956789400094769, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:27<13:56, 3.69s/it] 57%|█████▋ | 294/520 [18:30<13:53, 3.69s/it] {'loss': 1.3503, 'grad_norm': 0.00037734538972616243, 'learning_rate': 0.29352478377851765, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:30<13:53, 3.69s/it] 57%|█████▋ | 295/520 [18:34<13:48, 3.68s/it] {'loss': 1.5497, 'grad_norm': 0.0003716600206403233, 'learning_rate': 0.2913728218433423, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:34<13:48, 3.68s/it] 57%|█████▋ | 296/520 [18:38<13:48, 3.70s/it] {'loss': 1.3039, 'grad_norm': 0.00037277715551261285, 'learning_rate': 0.2892231378165744, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:38<13:48, 3.70s/it] 57%|█████▋ | 297/520 [18:42<13:43, 3.69s/it] {'loss': 1.4484, 'grad_norm': 0.000398056303381986, 'learning_rate': 0.28707581522233155, 'epoch': 0.57} + 57%|█████▋ | 297/520 [18:42<13:43, 3.69s/it] 57%|█████▋ | 298/520 [18:45<13:37, 3.68s/it] {'loss': 1.413, 'grad_norm': 0.0005985327087252647, 'learning_rate': 0.2849309374929799, 'epoch': 0.57} + 57%|█████▋ | 298/520 [18:45<13:37, 3.68s/it] 57%|█████▊ | 299/520 [18:49<13:32, 3.68s/it] {'loss': 1.5436, 'grad_norm': 0.0005134971094219621, 'learning_rate': 0.28278858796589235, 'epoch': 0.57} + 57%|█████▊ | 299/520 [18:49<13:32, 3.68s/it] 58%|█████▊ | 300/520 [18:52<13:26, 3.67s/it] {'loss': 1.462, 'grad_norm': 0.0005559768775405236, 'learning_rate': 0.2806488498802109, 'epoch': 0.58} + 58%|█████▊ | 300/520 [18:52<13:26, 3.67s/it] 58%|█████▊ | 301/520 [18:56<13:22, 3.66s/it] {'loss': 1.4578, 'grad_norm': 0.0004074112564807541, 'learning_rate': 0.2785118063736119, 'epoch': 0.58} + 58%|█████▊ | 301/520 [18:56<13:22, 3.66s/it] 58%|█████▊ | 302/520 [19:00<13:19, 3.67s/it] {'loss': 1.5596, 'grad_norm': 0.000415003243177834, 'learning_rate': 0.27637754047907653, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:00<13:19, 3.67s/it] 58%|█████▊ | 303/520 [19:04<13:16, 3.67s/it] {'loss': 1.3644, 'grad_norm': 0.0007873008426911333, 'learning_rate': 0.27424613512166396, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:04<13:16, 3.67s/it] 58%|█████▊ | 304/520 [19:07<13:23, 3.72s/it] {'loss': 1.4516, 'grad_norm': 0.0004371217409847069, 'learning_rate': 0.27211767311528995, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:07<13:23, 3.72s/it] 59%|█████▊ | 305/520 [19:11<13:26, 3.75s/it] {'loss': 1.515, 'grad_norm': 0.0006293444051805462, 'learning_rate': 0.2699922371595087, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:11<13:26, 3.75s/it] 59%|█████▉ | 306/520 [19:15<13:28, 3.78s/it] {'loss': 1.4146, 'grad_norm': 0.0004598487651704392, 'learning_rate': 0.2678699098362997, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:15<13:28, 3.78s/it] 59%|█████▉ | 307/520 [19:19<13:32, 3.81s/it] {'loss': 1.3673, 'grad_norm': 0.00031154167362720385, 'learning_rate': 0.2657507736068595, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:19<13:32, 3.81s/it] 59%|█████▉ | 308/520 [19:23<13:30, 3.82s/it] {'loss': 1.4742, 'grad_norm': 0.0003884803412387338, 'learning_rate': 0.2636349108083972, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:23<13:30, 3.82s/it] 59%|█████▉ | 309/520 [19:27<14:09, 4.03s/it] {'loss': 1.3476, 'grad_norm': 0.0007782825632264219, 'learning_rate': 0.2615224036509358, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:27<14:09, 4.03s/it] 60%|█████▉ | 310/520 [19:31<13:52, 3.97s/it] {'loss': 1.323, 'grad_norm': 0.0005555513351127592, 'learning_rate': 0.25941333421411766, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:31<13:52, 3.97s/it] 60%|█████▉ | 311/520 [19:35<13:40, 3.92s/it] {'loss': 1.3075, 'grad_norm': 0.000677415185229976, 'learning_rate': 0.2573077844440154, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:35<13:40, 3.92s/it] 60%|██████ | 312/520 [19:39<13:57, 4.03s/it] {'loss': 1.2865, 'grad_norm': 0.000519772304773659, 'learning_rate': 0.25520583614994824, 'epoch': 0.6} + 60%|██████ | 312/520 [19:39<13:57, 4.03s/it] 60%|██████ | 313/520 [19:43<13:40, 3.97s/it] {'loss': 1.276, 'grad_norm': 0.00034663137037108403, 'learning_rate': 0.2531075710013027, 'epoch': 0.6} + 60%|██████ | 313/520 [19:43<13:40, 3.97s/it] 60%|██████ | 314/520 [19:47<13:49, 4.03s/it] {'loss': 1.3166, 'grad_norm': 0.00035816123474176, 'learning_rate': 0.2510130705243604, 'epoch': 0.6} + 60%|██████ | 314/520 [19:47<13:49, 4.03s/it] 61%|██████ | 315/520 [19:51<13:32, 3.96s/it] {'loss': 1.5885, 'grad_norm': 0.0005227889385943499, 'learning_rate': 0.24892241609912957, 'epoch': 0.61} + 61%|██████ | 315/520 [19:51<13:32, 3.96s/it] 61%|██████ | 316/520 [19:55<13:37, 4.01s/it] {'loss': 1.2948, 'grad_norm': 0.0003611163403254677, 'learning_rate': 0.2468356889561835, 'epoch': 0.61} + 61%|██████ | 316/520 [19:55<13:37, 4.01s/it] 61%|██████ | 317/520 [19:59<13:22, 3.96s/it] {'loss': 1.2995, 'grad_norm': 0.0003750946471115701, 'learning_rate': 0.24475297017350442, 'epoch': 0.61} + 61%|██████ | 317/520 [19:59<13:22, 3.96s/it] 61%|██████ | 318/520 [20:03<13:11, 3.92s/it] {'loss': 1.441, 'grad_norm': 0.00037871392195661585, 'learning_rate': 0.24267434067333302, 'epoch': 0.61} + 61%|██████ | 318/520 [20:03<13:11, 3.92s/it] 61%|██████▏ | 319/520 [20:07<13:19, 3.98s/it] {'loss': 1.2975, 'grad_norm': 0.0006260666087033361, 'learning_rate': 0.24059988121902445, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:07<13:19, 3.98s/it] 62%|██████▏ | 320/520 [20:11<13:06, 3.93s/it] {'loss': 1.2377, 'grad_norm': 0.0003372504721917697, 'learning_rate': 0.23852967241191048, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:11<13:06, 3.93s/it] 62%|██████▏ | 321/520 [20:15<12:55, 3.90s/it] {'loss': 1.4707, 'grad_norm': 0.0005173953342262461, 'learning_rate': 0.23646379468816756, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:15<12:55, 3.90s/it] 62%|██████▏ | 322/520 [20:18<12:46, 3.87s/it] {'loss': 1.3816, 'grad_norm': 0.0003936904364817923, 'learning_rate': 0.2344023283156916, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:18<12:46, 3.87s/it] 62%|██████▏ | 323/520 [20:22<12:33, 3.82s/it] {'loss': 1.4802, 'grad_norm': 0.0004311664772844142, 'learning_rate': 0.23234535339097928, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:22<12:33, 3.82s/it] 62%|██████▏ | 324/520 [20:26<12:20, 3.78s/it] {'loss': 1.4081, 'grad_norm': 0.0003312029626123314, 'learning_rate': 0.23029294983601595, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:26<12:20, 3.78s/it] 62%|██████▎ | 325/520 [20:29<12:13, 3.76s/it] {'loss': 1.3957, 'grad_norm': 0.00035760966839966656, 'learning_rate': 0.22824519739517043, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:29<12:13, 3.76s/it] 63%|██████▎ | 326/520 [20:33<12:03, 3.73s/it] {'loss': 1.3804, 'grad_norm': 0.0003226140124430067, 'learning_rate': 0.2262021756320965, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:33<12:03, 3.73s/it] 63%|██████▎ | 327/520 [20:37<11:57, 3.72s/it] {'loss': 1.5616, 'grad_norm': 0.0005218590425908595, 'learning_rate': 0.22416396392664134, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:37<11:57, 3.72s/it] 63%|██████▎ | 328/520 [20:40<11:46, 3.68s/it] {'loss': 1.464, 'grad_norm': 0.0006592724287184849, 'learning_rate': 0.22213064147176173, 'epoch': 0.63} + 63%|██████▎ | 328/520 [20:40<11:46, 3.68s/it] 63%|██████▎ | 329/520 [20:44<11:42, 3.68s/it] {'loss': 1.2945, 'grad_norm': 0.00035616270195134566, 'learning_rate': 0.2201022872704467, 'epoch': 0.63} + 63%|██████▎ | 329/520 [20:44<11:42, 3.68s/it] 63%|██████▎ | 330/520 [20:48<11:36, 3.66s/it] {'loss': 1.3865, 'grad_norm': 0.00033245672956345594, 'learning_rate': 0.21807898013264815, 'epoch': 0.63} + 63%|██████▎ | 330/520 [20:48<11:36, 3.66s/it] 64%|██████▎ | 331/520 [20:51<11:31, 3.66s/it] {'loss': 1.3258, 'grad_norm': 0.00034297267160739647, 'learning_rate': 0.21606079867221858, 'epoch': 0.64} + 64%|██████▎ | 331/520 [20:51<11:31, 3.66s/it] 64%|██████▍ | 332/520 [20:55<11:24, 3.64s/it] {'loss': 1.5568, 'grad_norm': 0.00042342069583843937, 'learning_rate': 0.21404782130385686, 'epoch': 0.64} + 64%|██████▍ | 332/520 [20:55<11:24, 3.64s/it] 64%|██████▍ | 333/520 [20:59<11:21, 3.64s/it] {'loss': 1.5235, 'grad_norm': 0.0004436772600253401, 'learning_rate': 0.21204012624006124, 'epoch': 0.64} + 64%|██████▍ | 333/520 [20:59<11:21, 3.64s/it] 64%|██████▍ | 334/520 [21:02<11:15, 3.63s/it] {'loss': 1.3872, 'grad_norm': 0.0004099946652384488, 'learning_rate': 0.2100377914880907, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:02<11:15, 3.63s/it] 64%|██████▍ | 335/520 [21:06<11:11, 3.63s/it] {'loss': 1.3971, 'grad_norm': 0.000670019121631239, 'learning_rate': 0.20804089484693378, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:06<11:11, 3.63s/it] 65%|██████▍ | 336/520 [21:09<11:12, 3.65s/it] {'loss': 1.2789, 'grad_norm': 0.0005277507325085634, 'learning_rate': 0.20604951390428602, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:09<11:12, 3.65s/it] 65%|██████▍ | 337/520 [21:13<11:08, 3.65s/it] {'loss': 1.2739, 'grad_norm': 0.00042474620914939766, 'learning_rate': 0.2040637260335353, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:13<11:08, 3.65s/it] 65%|██████▌ | 338/520 [21:17<11:06, 3.66s/it] {'loss': 1.4151, 'grad_norm': 0.0004204100108172578, 'learning_rate': 0.20208360839075523, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:17<11:06, 3.66s/it] 65%|██████▌ | 339/520 [21:20<11:01, 3.66s/it] {'loss': 1.3495, 'grad_norm': 0.00039235339453749915, 'learning_rate': 0.20010923791170798, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:20<11:01, 3.66s/it] 65%|██████▌ | 340/520 [21:24<10:59, 3.66s/it] {'loss': 1.3242, 'grad_norm': 0.0005739936532490101, 'learning_rate': 0.19814069130885467, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:24<10:59, 3.66s/it] 66%|██████▌ | 341/520 [21:28<10:55, 3.66s/it] {'loss': 1.3541, 'grad_norm': 0.0003632319405714974, 'learning_rate': 0.19617804506837422, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:28<10:55, 3.66s/it] 66%|██████▌ | 342/520 [21:31<10:51, 3.66s/it] {'loss': 1.5253, 'grad_norm': 0.00041099375248402595, 'learning_rate': 0.19422137544719265, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:31<10:51, 3.66s/it] 66%|██████▌ | 343/520 [21:35<10:50, 3.68s/it] {'loss': 1.509, 'grad_norm': 0.0003761559783540514, 'learning_rate': 0.1922707584700191, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:35<10:50, 3.68s/it] 66%|██████▌ | 344/520 [21:39<10:47, 3.68s/it] {'loss': 1.3016, 'grad_norm': 0.0005324104865263199, 'learning_rate': 0.19032626992639293, 'epoch': 0.66} + 66%|██████▌ | 344/520 [21:39<10:47, 3.68s/it] 66%|██████▋ | 345/520 [21:43<10:42, 3.67s/it] {'loss': 1.4265, 'grad_norm': 0.000499227689227074, 'learning_rate': 0.1883879853677382, 'epoch': 0.66} + 66%|██████▋ | 345/520 [21:43<10:42, 3.67s/it] 67%|██████▋ | 346/520 [21:46<10:36, 3.66s/it] {'loss': 1.4827, 'grad_norm': 0.0003519101778632128, 'learning_rate': 0.18645598010442826, 'epoch': 0.67} + 67%|██████▋ | 346/520 [21:46<10:36, 3.66s/it] 67%|██████▋ | 347/520 [21:50<10:34, 3.67s/it] {'loss': 1.3197, 'grad_norm': 0.00036326439891828466, 'learning_rate': 0.18453032920286058, 'epoch': 0.67} + 67%|██████▋ | 347/520 [21:50<10:34, 3.67s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [21:53<10:30, 3.66s/it] {'loss': 1.3071, 'grad_norm': 0.00040230647040831664, 'learning_rate': 0.18261110748253873, 'epoch': 0.67} + 67%|██████▋ | 348/520 [21:53<10:30, 3.66s/it] 67%|██████▋ | 349/520 [21:57<10:27, 3.67s/it] {'loss': 1.3247, 'grad_norm': 0.0006283302087781504, 'learning_rate': 0.18069838951316605, 'epoch': 0.67} + 67%|██████▋ | 349/520 [21:57<10:27, 3.67s/it] 67%|██████▋ | 350/520 [22:01<10:25, 3.68s/it] {'loss': 1.3706, 'grad_norm': 0.0004028401433885691, 'learning_rate': 0.17879224961174886, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:01<10:25, 3.68s/it] 68%|██████▊ | 351/520 [22:05<10:24, 3.69s/it] {'loss': 1.2697, 'grad_norm': 0.00038718096721171197, 'learning_rate': 0.1768927618397074, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:05<10:24, 3.69s/it] 68%|██████▊ | 352/520 [22:08<10:20, 3.70s/it] {'loss': 1.4052, 'grad_norm': 0.0004841591343952838, 'learning_rate': 0.17500000000000007, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:08<10:20, 3.70s/it] 68%|██████▊ | 353/520 [22:12<10:16, 3.69s/it] {'loss': 1.453, 'grad_norm': 0.00045982515038273363, 'learning_rate': 0.17311403763425434, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:12<10:16, 3.69s/it] 68%|██████▊ | 354/520 [22:16<10:11, 3.68s/it] {'loss': 1.576, 'grad_norm': 0.00036646902874634023, 'learning_rate': 0.17123494801991013, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:16<10:11, 3.68s/it] 68%|██████▊ | 355/520 [22:19<10:05, 3.67s/it] {'loss': 1.3511, 'grad_norm': 0.0003599776925668907, 'learning_rate': 0.16936280416737262, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:19<10:05, 3.67s/it] 68%|██████▊ | 356/520 [22:23<10:01, 3.67s/it] {'loss': 1.3355, 'grad_norm': 0.0004837548685994779, 'learning_rate': 0.1674976788171757, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:23<10:01, 3.67s/it] 69%|██████▊ | 357/520 [22:27<09:57, 3.67s/it] {'loss': 1.3605, 'grad_norm': 0.0003317875389093081, 'learning_rate': 0.1656396444371547, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:27<09:57, 3.67s/it] 69%|██████▉ | 358/520 [22:30<09:54, 3.67s/it] {'loss': 1.2949, 'grad_norm': 0.0009252861622543551, 'learning_rate': 0.16378877321963223, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:30<09:54, 3.67s/it] 69%|██████▉ | 359/520 [22:34<09:49, 3.66s/it] {'loss': 1.5007, 'grad_norm': 0.0004892088775478256, 'learning_rate': 0.16194513707861158, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:34<09:49, 3.66s/it] 69%|██████▉ | 360/520 [22:38<09:44, 3.65s/it] {'loss': 1.507, 'grad_norm': 0.0004008385599404024, 'learning_rate': 0.16010880764698424, 'epoch': 0.69} + 69%|██████▉ | 360/520 [22:38<09:44, 3.65s/it] 69%|██████▉ | 361/520 [22:41<09:40, 3.65s/it] {'loss': 1.5232, 'grad_norm': 0.0003267406602062789, 'learning_rate': 0.15827985627374508, 'epoch': 0.69} + 69%|██████▉ | 361/520 [22:41<09:40, 3.65s/it] 70%|██████▉ | 362/520 [22:45<09:36, 3.65s/it] {'loss': 1.3369, 'grad_norm': 0.0003354380225815985, 'learning_rate': 0.15645835402122119, 'epoch': 0.7} + 70%|██████▉ | 362/520 [22:45<09:36, 3.65s/it] 70%|██████▉ | 363/520 [22:49<09:34, 3.66s/it] {'loss': 1.424, 'grad_norm': 0.0005238304420502826, 'learning_rate': 0.15464437166231065, 'epoch': 0.7} + 70%|██████▉ | 363/520 [22:49<09:34, 3.66s/it] 70%|███████ | 364/520 [22:52<09:29, 3.65s/it] {'loss': 1.535, 'grad_norm': 0.00040638491333260706, 'learning_rate': 0.15283797967773227, 'epoch': 0.7} + 70%|███████ | 364/520 [22:52<09:29, 3.65s/it] 70%|███████ | 365/520 [22:56<09:25, 3.65s/it] {'loss': 1.4572, 'grad_norm': 0.0003773161548458358, 'learning_rate': 0.1510392482532877, 'epoch': 0.7} + 70%|███████ | 365/520 [22:56<09:25, 3.65s/it] 70%|███████ | 366/520 [23:00<09:23, 3.66s/it] {'loss': 1.4187, 'grad_norm': 0.0003536922771571569, 'learning_rate': 0.14924824727713396, 'epoch': 0.7} + 70%|███████ | 366/520 [23:00<09:23, 3.66s/it] 71%|███████ | 367/520 [23:03<09:19, 3.66s/it] {'loss': 1.4125, 'grad_norm': 0.0005005816579228114, 'learning_rate': 0.14746504633706797, 'epoch': 0.71} + 71%|███████ | 367/520 [23:03<09:19, 3.66s/it] 71%|███████ | 368/520 [23:07<09:17, 3.67s/it] {'loss': 1.2588, 'grad_norm': 0.0004929127657218143, 'learning_rate': 0.14568971471782363, 'epoch': 0.71} + 71%|███████ | 368/520 [23:07<09:17, 3.67s/it] 71%|███████ | 369/520 [23:10<09:13, 3.66s/it] {'loss': 1.4888, 'grad_norm': 0.0004936194779421267, 'learning_rate': 0.14392232139837835, 'epoch': 0.71} + 71%|███████ | 369/520 [23:11<09:13, 3.66s/it] 71%|███████ | 370/520 [23:14<09:08, 3.66s/it] {'loss': 1.3084, 'grad_norm': 0.0004357681643341086, 'learning_rate': 0.14216293504927446, 'epoch': 0.71} + 71%|███████ | 370/520 [23:14<09:08, 3.66s/it] 71%|███████▏ | 371/520 [23:18<09:05, 3.66s/it] {'loss': 1.31, 'grad_norm': 0.00039689696506931254, 'learning_rate': 0.1404116240299499, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:18<09:05, 3.66s/it] 72%|███████▏ | 372/520 [23:21<09:00, 3.65s/it] {'loss': 1.5997, 'grad_norm': 0.0003485930347949447, 'learning_rate': 0.13866845638608283, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:21<09:00, 3.65s/it] 72%|███████▏ | 373/520 [23:25<08:55, 3.64s/it] {'loss': 1.466, 'grad_norm': 0.0007298200649921287, 'learning_rate': 0.13693349984694775, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:25<08:55, 3.64s/it] 72%|███████▏ | 374/520 [23:29<08:54, 3.66s/it] {'loss': 1.4113, 'grad_norm': 0.0003630941448188674, 'learning_rate': 0.13520682182278346, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:29<08:54, 3.66s/it] 72%|███████▏ | 375/520 [23:32<08:50, 3.66s/it] {'loss': 1.301, 'grad_norm': 0.0004259666292331845, 'learning_rate': 0.13348848940217412, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:32<08:50, 3.66s/it] 72%|███████▏ | 376/520 [23:36<08:44, 3.64s/it] {'loss': 1.4279, 'grad_norm': 0.00048224155054164017, 'learning_rate': 0.13177856934944326, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:36<08:44, 3.64s/it] 72%|███████▎ | 377/520 [23:40<08:41, 3.65s/it] {'loss': 1.3634, 'grad_norm': 0.00034142427045807597, 'learning_rate': 0.13007712810205843, 'epoch': 0.72} + 72%|███████▎ | 377/520 [23:40<08:41, 3.65s/it] 73%|███████▎ | 378/520 [23:43<08:37, 3.64s/it] {'loss': 1.4221, 'grad_norm': 0.00031229007209597855, 'learning_rate': 0.1283842317680511, 'epoch': 0.73} + 73%|███████▎ | 378/520 [23:43<08:37, 3.64s/it] 73%|███████▎ | 379/520 [23:47<08:33, 3.64s/it] {'loss': 1.4127, 'grad_norm': 0.0006289816434069699, 'learning_rate': 0.12669994612344704, 'epoch': 0.73} + 73%|███████▎ | 379/520 [23:47<08:33, 3.64s/it] 73%|███████▎ | 380/520 [23:51<08:28, 3.63s/it] {'loss': 1.5812, 'grad_norm': 0.00034459943178090195, 'learning_rate': 0.1250243366097112, 'epoch': 0.73} + 73%|███████▎ | 380/520 [23:51<08:28, 3.63s/it] 73%|███████▎ | 381/520 [23:54<08:26, 3.64s/it] {'loss': 1.4039, 'grad_norm': 0.00044693619090295267, 'learning_rate': 0.12335746833120538, 'epoch': 0.73} + 73%|███████▎ | 381/520 [23:54<08:26, 3.64s/it] 73%|███████▎ | 382/520 [23:58<08:23, 3.65s/it] {'loss': 1.5001, 'grad_norm': 0.0005746437895285088, 'learning_rate': 0.1216994060526577, 'epoch': 0.73} + 73%|███████▎ | 382/520 [23:58<08:23, 3.65s/it] 74%|███████▎ | 383/520 [24:02<08:21, 3.66s/it] {'loss': 1.2428, 'grad_norm': 0.00045763896807700475, 'learning_rate': 0.12005021419664687, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:02<08:21, 3.66s/it] 74%|███████▍ | 384/520 [24:05<08:14, 3.64s/it] {'loss': 1.6753, 'grad_norm': 0.0003910312105761336, 'learning_rate': 0.11840995684109928, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:05<08:14, 3.64s/it] 74%|███████▍ | 385/520 [24:09<08:11, 3.64s/it] {'loss': 1.3962, 'grad_norm': 0.0003109301159927752, 'learning_rate': 0.11677869771679862, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:09<08:11, 3.64s/it] 74%|███████▍ | 386/520 [24:12<08:07, 3.64s/it] {'loss': 1.3196, 'grad_norm': 0.0003066000767508558, 'learning_rate': 0.11515650020491051, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:12<08:07, 3.64s/it] 74%|███████▍ | 387/520 [24:16<08:06, 3.65s/it] {'loss': 1.5957, 'grad_norm': 0.00033102299795617374, 'learning_rate': 0.1135434273345189, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:16<08:06, 3.65s/it] 75%|███████▍ | 388/520 [24:20<08:02, 3.66s/it] {'loss': 1.2832, 'grad_norm': 0.00036487234163640084, 'learning_rate': 0.11193954178017813, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:20<08:02, 3.66s/it] 75%|███████▍ | 389/520 [24:23<07:59, 3.66s/it] {'loss': 1.3551, 'grad_norm': 0.00038145330409715113, 'learning_rate': 0.11034490585947726, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:23<07:59, 3.66s/it] 75%|███████▌ | 390/520 [24:27<07:57, 3.67s/it] {'loss': 1.4128, 'grad_norm': 0.0005309767621665355, 'learning_rate': 0.10875958153061854, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:27<07:57, 3.67s/it] 75%|███████▌ | 391/520 [24:31<07:53, 3.67s/it] {'loss': 1.4904, 'grad_norm': 0.0006756804047689409, 'learning_rate': 0.10718363039001041, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:31<07:53, 3.67s/it] 75%|███████▌ | 392/520 [24:35<07:50, 3.68s/it] {'loss': 1.3075, 'grad_norm': 0.00034511968902784825, 'learning_rate': 0.10561711366987453, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:35<07:50, 3.68s/it] 76%|███████▌ | 393/520 [24:38<07:51, 3.71s/it] {'loss': 1.3896, 'grad_norm': 0.00036159812345979705, 'learning_rate': 0.10406009223586579, 'epoch': 0.76} + 76%|███████▌ | 393/520 [24:38<07:51, 3.71s/it] 76%|███████▌ | 394/520 [24:42<07:52, 3.75s/it] {'loss': 1.3814, 'grad_norm': 0.0003753446718547125, 'learning_rate': 0.10251262658470839, 'epoch': 0.76} + 76%|███████▌ | 394/520 [24:42<07:52, 3.75s/it] 76%|███████▌ | 395/520 [24:46<07:55, 3.81s/it] {'loss': 1.336, 'grad_norm': 0.0003773323890062026, 'learning_rate': 0.10097477684184453, 'epoch': 0.76} + 76%|███████▌ | 395/520 [24:46<07:55, 3.81s/it] 76%|███████▌ | 396/520 [24:50<07:54, 3.83s/it] {'loss': 1.4084, 'grad_norm': 0.000578248885608014, 'learning_rate': 0.09944660275909854, 'epoch': 0.76} + 76%|███████▌ | 396/520 [24:50<07:54, 3.83s/it] 76%|███████▋ | 397/520 [24:54<07:52, 3.85s/it] {'loss': 1.3897, 'grad_norm': 0.00045704344240048964, 'learning_rate': 0.09792816371235576, 'epoch': 0.76} + 76%|███████▋ | 397/520 [24:54<07:52, 3.85s/it] 77%|███████▋ | 398/520 [24:58<07:49, 3.85s/it] {'loss': 1.367, 'grad_norm': 0.00043611766381681464, 'learning_rate': 0.09641951869925457, 'epoch': 0.77} + 77%|███████▋ | 398/520 [24:58<07:49, 3.85s/it] 77%|███████▋ | 399/520 [25:02<07:47, 3.86s/it] {'loss': 1.4431, 'grad_norm': 0.00037996420522302303, 'learning_rate': 0.09492072633689508, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:02<07:47, 3.86s/it] 77%|███████▋ | 400/520 [25:06<07:46, 3.89s/it] {'loss': 1.5171, 'grad_norm': 0.0004336207268322645, 'learning_rate': 0.09343184485956085, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:06<07:46, 3.89s/it] 77%|███████▋ | 401/520 [25:09<07:37, 3.85s/it] {'loss': 1.1856, 'grad_norm': 0.00035564064053160226, 'learning_rate': 0.09195293211645661, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:09<07:37, 3.85s/it] 77%|███████▋ | 402/520 [25:13<07:26, 3.79s/it] {'loss': 1.3262, 'grad_norm': 0.00033771019617768566, 'learning_rate': 0.09048404556946063, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:13<07:26, 3.79s/it] 78%|███████▊ | 403/520 [25:17<07:18, 3.75s/it] {'loss': 1.3699, 'grad_norm': 0.00036376713869419575, 'learning_rate': 0.08902524229089204, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:17<07:18, 3.75s/it] 78%|███████▊ | 404/520 [25:20<07:11, 3.72s/it] {'loss': 1.266, 'grad_norm': 0.0005475801734021896, 'learning_rate': 0.08757657896129298, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:20<07:11, 3.72s/it] 78%|███████▊ | 405/520 [25:24<07:06, 3.71s/it] {'loss': 1.4426, 'grad_norm': 0.00041519420260646456, 'learning_rate': 0.08613811186722706, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:24<07:06, 3.71s/it] 78%|███████▊ | 406/520 [25:28<07:01, 3.69s/it] {'loss': 1.3891, 'grad_norm': 0.00042646029082849446, 'learning_rate': 0.08470989689909139, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:28<07:01, 3.69s/it] 78%|███████▊ | 407/520 [25:31<06:58, 3.71s/it] {'loss': 1.4679, 'grad_norm': 0.00040130902645655193, 'learning_rate': 0.08329198954894622, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:31<06:58, 3.71s/it] 78%|███████▊ | 408/520 [25:35<06:53, 3.69s/it] {'loss': 1.3516, 'grad_norm': 0.00048682476954078907, 'learning_rate': 0.08188444490835772, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:35<06:53, 3.69s/it] 79%|███████▊ | 409/520 [25:39<06:50, 3.70s/it] {'loss': 1.4812, 'grad_norm': 0.0009299766604339799, 'learning_rate': 0.08048731766625802, 'epoch': 0.79} + 79%|███████▊ | 409/520 [25:39<06:50, 3.70s/it] 79%|███████▉ | 410/520 [25:42<06:45, 3.69s/it] {'loss': 1.1949, 'grad_norm': 0.0003814766434315272, 'learning_rate': 0.0791006621068204, 'epoch': 0.79} + 79%|███████▉ | 410/520 [25:42<06:45, 3.69s/it] 79%|███████▉ | 411/520 [25:46<06:42, 3.69s/it] {'loss': 1.4584, 'grad_norm': 0.0004027643531504532, 'learning_rate': 0.07772453210734984, 'epoch': 0.79} + 79%|███████▉ | 411/520 [25:46<06:42, 3.69s/it] 79%|███████▉ | 412/520 [25:50<06:37, 3.68s/it] {'loss': 1.3695, 'grad_norm': 0.0004575020019123971, 'learning_rate': 0.07635898113618957, 'epoch': 0.79} + 79%|███████▉ | 412/520 [25:50<06:37, 3.68s/it] 79%|███████▉ | 413/520 [25:53<06:34, 3.68s/it] {'loss': 1.5141, 'grad_norm': 0.000509715899178195, 'learning_rate': 0.07500406225064427, 'epoch': 0.79} + 79%|███████▉ | 413/520 [25:53<06:34, 3.68s/it] 80%|███████▉ | 414/520 [25:57<06:30, 3.68s/it] {'loss': 1.2618, 'grad_norm': 0.0004287917489141785, 'learning_rate': 0.07365982809491764, 'epoch': 0.8} + 80%|███████▉ | 414/520 [25:57<06:30, 3.68s/it] 80%|███████▉ | 415/520 [26:01<06:28, 3.70s/it] {'loss': 1.3326, 'grad_norm': 0.00037603663886628014, 'learning_rate': 0.07232633089806773, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:01<06:28, 3.70s/it] 80%|████████ | 416/520 [26:05<06:24, 3.69s/it] {'loss': 1.2554, 'grad_norm': 0.0003607887825795526, 'learning_rate': 0.07100362247197724, 'epoch': 0.8} + 80%|████████ | 416/520 [26:05<06:24, 3.69s/it] 80%|████████ | 417/520 [26:08<06:19, 3.69s/it] {'loss': 1.4199, 'grad_norm': 0.0003220447900476051, 'learning_rate': 0.06969175420934025, 'epoch': 0.8} + 80%|████████ | 417/520 [26:08<06:19, 3.69s/it] 80%|████████ | 418/520 [26:12<06:14, 3.67s/it] {'loss': 1.4193, 'grad_norm': 0.0003330149107044422, 'learning_rate': 0.06839077708166608, 'epoch': 0.8} + 80%|████████ | 418/520 [26:12<06:14, 3.67s/it] 81%|████████ | 419/520 [26:16<06:10, 3.67s/it] {'loss': 1.3973, 'grad_norm': 0.0004029594136110505, 'learning_rate': 0.06710074163729816, 'epoch': 0.81} + 81%|████████ | 419/520 [26:16<06:10, 3.67s/it] 81%|████████ | 420/520 [26:19<06:06, 3.66s/it] {'loss': 1.2683, 'grad_norm': 0.00047962062735636097, 'learning_rate': 0.06582169799945022, 'epoch': 0.81} + 81%|████████ | 420/520 [26:19<06:06, 3.66s/it] 81%|████████ | 421/520 [26:23<06:02, 3.66s/it] {'loss': 1.2043, 'grad_norm': 0.0005962846533371918, 'learning_rate': 0.06455369586425894, 'epoch': 0.81} + 81%|████████ | 421/520 [26:23<06:02, 3.66s/it] 81%|████████ | 422/520 [26:26<05:57, 3.65s/it] {'loss': 1.3439, 'grad_norm': 0.0005766397317889181, 'learning_rate': 0.06329678449885283, 'epoch': 0.81} + 81%|████████ | 422/520 [26:26<05:57, 3.65s/it] 81%|████████▏ | 423/520 [26:30<05:55, 3.66s/it] {'loss': 1.3283, 'grad_norm': 0.0003652777640345014, 'learning_rate': 0.062051012739438326, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:30<05:55, 3.66s/it] 82%|████████▏ | 424/520 [26:34<05:50, 3.66s/it] {'loss': 1.5983, 'grad_norm': 0.00041939282437546095, 'learning_rate': 0.06081642898940186, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:34<05:50, 3.66s/it] 82%|████████▏ | 425/520 [26:37<05:48, 3.67s/it] {'loss': 1.3211, 'grad_norm': 0.00037741269653685617, 'learning_rate': 0.05959308121742937, 'epoch': 0.82} + 82%|████████▏ | 425/520 [26:37<05:48, 3.67s/it] 82%|████████▏ | 426/520 [26:41<05:44, 3.66s/it] {'loss': 1.3962, 'grad_norm': 0.0006456703824814993, 'learning_rate': 0.058381016955642906, 'epoch': 0.82} + 82%|████████▏ | 426/520 [26:41<05:44, 3.66s/it] 82%|████████▏ | 427/520 [26:45<05:42, 3.69s/it] {'loss': 1.271, 'grad_norm': 0.00038696186710827994, 'learning_rate': 0.057180283297753084, 'epoch': 0.82} + 82%|████████▏ | 427/520 [26:45<05:42, 3.69s/it] 82%|████████▏ | 428/520 [26:49<05:39, 3.69s/it] {'loss': 1.2413, 'grad_norm': 0.0006161424811245082, 'learning_rate': 0.05599092689723, 'epoch': 0.82} + 82%|████████▏ | 428/520 [26:49<05:39, 3.69s/it] 82%|████████▎ | 429/520 [26:52<05:33, 3.67s/it] {'loss': 1.3616, 'grad_norm': 0.0004119072962458737, 'learning_rate': 0.054812993965490074, 'epoch': 0.82} + 82%|████████▎ | 429/520 [26:52<05:33, 3.67s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [26:56<05:30, 3.67s/it] {'loss': 1.3397, 'grad_norm': 0.00036006851125768035, 'learning_rate': 0.05364653027010055, 'epoch': 0.83} + 83%|████████▎ | 430/520 [26:56<05:30, 3.67s/it] 83%|████████▎ | 431/520 [27:00<05:27, 3.68s/it] {'loss': 1.4846, 'grad_norm': 0.0005839070622391837, 'learning_rate': 0.052491581133001806, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:00<05:27, 3.68s/it] 83%|████████▎ | 432/520 [27:03<05:24, 3.69s/it] {'loss': 1.2616, 'grad_norm': 0.0004646146282232428, 'learning_rate': 0.051348191428745533, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:03<05:24, 3.69s/it] 83%|████████▎ | 433/520 [27:07<05:19, 3.68s/it] {'loss': 1.4032, 'grad_norm': 0.0005658319610145462, 'learning_rate': 0.05021640558275203, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:07<05:19, 3.68s/it] 83%|████████▎ | 434/520 [27:11<05:16, 3.68s/it] {'loss': 1.1352, 'grad_norm': 0.0003584436359036624, 'learning_rate': 0.04909626756958339, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:11<05:16, 3.68s/it] 84%|████████▎ | 435/520 [27:14<05:12, 3.67s/it] {'loss': 1.4598, 'grad_norm': 0.0004621908608829411, 'learning_rate': 0.047987820911235435, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:14<05:12, 3.67s/it] 84%|████████▍ | 436/520 [27:18<05:07, 3.67s/it] {'loss': 1.2303, 'grad_norm': 0.0005268754610997922, 'learning_rate': 0.04689110867544645, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:18<05:07, 3.67s/it] 84%|████████▍ | 437/520 [27:22<05:05, 3.68s/it] {'loss': 1.4793, 'grad_norm': 0.00034873677763116946, 'learning_rate': 0.045806173474023756, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:22<05:05, 3.68s/it] 84%|████████▍ | 438/520 [27:25<05:02, 3.69s/it] {'loss': 1.2607, 'grad_norm': 0.0003897113812571173, 'learning_rate': 0.044733057461188136, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:25<05:02, 3.69s/it] 84%|████████▍ | 439/520 [27:29<05:03, 3.75s/it] {'loss': 1.4249, 'grad_norm': 0.00033297298527893397, 'learning_rate': 0.04367180233193621, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:29<05:03, 3.75s/it] 85%|████████▍ | 440/520 [27:33<05:01, 3.77s/it] {'loss': 1.3241, 'grad_norm': 0.0006431416430415013, 'learning_rate': 0.04262244932041997, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:33<05:01, 3.77s/it] 85%|████████▍ | 441/520 [27:37<04:59, 3.80s/it] {'loss': 1.4704, 'grad_norm': 0.00034366661314165194, 'learning_rate': 0.04158503919834516, 'epoch': 0.85} + 85%|████████▍ | 441/520 [27:37<04:59, 3.80s/it] 85%|████████▌ | 442/520 [27:41<04:57, 3.82s/it] {'loss': 1.3583, 'grad_norm': 0.0005871664482210914, 'learning_rate': 0.040559612273386614, 'epoch': 0.85} + 85%|████████▌ | 442/520 [27:41<04:57, 3.82s/it] 85%|████████▌ | 443/520 [27:45<04:54, 3.82s/it] {'loss': 1.393, 'grad_norm': 0.0003721289210951848, 'learning_rate': 0.0395462083876224, 'epoch': 0.85} + 85%|████████▌ | 443/520 [27:45<04:54, 3.82s/it] 85%|████████▌ | 444/520 [27:48<04:51, 3.83s/it] {'loss': 1.3559, 'grad_norm': 0.0005842521549762365, 'learning_rate': 0.038544866915986006, 'epoch': 0.85} + 85%|████████▌ | 444/520 [27:48<04:51, 3.83s/it] 86%|████████▌ | 445/520 [27:52<04:47, 3.84s/it] {'loss': 1.2675, 'grad_norm': 0.0005049657547540357, 'learning_rate': 0.03755562676473603, 'epoch': 0.86} + 86%|████████▌ | 445/520 [27:52<04:47, 3.84s/it] 86%|████████▌ | 446/520 [27:56<04:44, 3.84s/it] {'loss': 1.5351, 'grad_norm': 0.0003721102533484663, 'learning_rate': 0.03657852636994467, 'epoch': 0.86} + 86%|████████▌ | 446/520 [27:56<04:44, 3.84s/it] 86%|████████▌ | 447/520 [28:00<04:40, 3.85s/it] {'loss': 1.3604, 'grad_norm': 0.00036407199374821966, 'learning_rate': 0.03561360369600458, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:00<04:40, 3.85s/it] 86%|████████▌ | 448/520 [28:04<04:37, 3.85s/it] {'loss': 1.3417, 'grad_norm': 0.0006301955453913337, 'learning_rate': 0.03466089623415333, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:04<04:37, 3.85s/it] 86%|████████▋ | 449/520 [28:08<04:33, 3.85s/it] {'loss': 1.5105, 'grad_norm': 0.0004915310363941975, 'learning_rate': 0.03372044100101723, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:08<04:33, 3.85s/it] 87%|████████▋ | 450/520 [28:12<04:30, 3.87s/it] {'loss': 1.4002, 'grad_norm': 0.00036159815176025863, 'learning_rate': 0.03279227453717252, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:12<04:30, 3.87s/it] 87%|████████▋ | 451/520 [28:15<04:26, 3.86s/it] {'loss': 1.3971, 'grad_norm': 0.000465711872086561, 'learning_rate': 0.03187643290572617, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:15<04:26, 3.86s/it] 87%|████████▋ | 452/520 [28:19<04:21, 3.85s/it] {'loss': 1.547, 'grad_norm': 0.00033240325087123855, 'learning_rate': 0.0309729516909144, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:19<04:21, 3.85s/it] 87%|████████▋ | 453/520 [28:23<04:17, 3.84s/it] {'loss': 1.538, 'grad_norm': 0.0006761528431296984, 'learning_rate': 0.030081865996719945, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:23<04:17, 3.84s/it] 87%|████████▋ | 454/520 [28:27<04:13, 3.84s/it] {'loss': 1.292, 'grad_norm': 0.0003359794141552523, 'learning_rate': 0.02920321044550833, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:27<04:13, 3.84s/it] 88%|████████▊ | 455/520 [28:31<04:09, 3.84s/it] {'loss': 1.443, 'grad_norm': 0.0008486075429842043, 'learning_rate': 0.028337019176682767, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:31<04:09, 3.84s/it] 88%|████████▊ | 456/520 [28:35<04:05, 3.84s/it] {'loss': 1.3464, 'grad_norm': 0.0004360370909072131, 'learning_rate': 0.02748332584535729, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:35<04:05, 3.84s/it] 88%|████████▊ | 457/520 [28:39<04:02, 3.85s/it] {'loss': 1.5278, 'grad_norm': 0.00048591261388528936, 'learning_rate': 0.02664216362104964, 'epoch': 0.88} + 88%|████████▊ | 457/520 [28:39<04:02, 3.85s/it] 88%|████████▊ | 458/520 [28:42<03:58, 3.84s/it] {'loss': 1.5036, 'grad_norm': 0.0004078484365882811, 'learning_rate': 0.02581356518639197, 'epoch': 0.88} + 88%|████████▊ | 458/520 [28:42<03:58, 3.84s/it] 88%|████████▊ | 459/520 [28:46<03:54, 3.85s/it] {'loss': 1.4097, 'grad_norm': 0.00033941152234170165, 'learning_rate': 0.024997562735861255, 'epoch': 0.88} + 88%|████████▊ | 459/520 [28:46<03:54, 3.85s/it] 88%|████████▊ | 460/520 [28:50<03:50, 3.85s/it] {'loss': 1.2838, 'grad_norm': 0.0004621545492263201, 'learning_rate': 0.02419418797452855, 'epoch': 0.88} + 88%|████████▊ | 460/520 [28:50<03:50, 3.85s/it] 89%|████████▊ | 461/520 [28:54<03:47, 3.86s/it] {'loss': 1.6116, 'grad_norm': 0.00031509365941954085, 'learning_rate': 0.023403472116826723, 'epoch': 0.89} + 89%|████████▊ | 461/520 [28:54<03:47, 3.86s/it] 89%|████████▉ | 462/520 [28:58<03:42, 3.84s/it] {'loss': 1.595, 'grad_norm': 0.000632833149582814, 'learning_rate': 0.022625445885338102, 'epoch': 0.89} + 89%|████████▉ | 462/520 [28:58<03:42, 3.84s/it] 89%|████████▉ | 463/520 [29:02<03:39, 3.86s/it] {'loss': 1.244, 'grad_norm': 0.00037214785287316546, 'learning_rate': 0.021860139509600317, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:02<03:39, 3.86s/it] 89%|████████▉ | 464/520 [29:05<03:35, 3.86s/it] {'loss': 1.4172, 'grad_norm': 0.0003890900099654929, 'learning_rate': 0.021107582724932086, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:06<03:35, 3.86s/it] 89%|████████▉ | 465/520 [29:09<03:33, 3.87s/it] {'loss': 1.5144, 'grad_norm': 0.0004090501991484812, 'learning_rate': 0.020367804771277784, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:09<03:33, 3.87s/it] 90%|████████▉ | 466/520 [29:13<03:28, 3.86s/it] {'loss': 1.4054, 'grad_norm': 0.0004007960324703359, 'learning_rate': 0.019640834392071347, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:13<03:28, 3.86s/it] 90%|████████▉ | 467/520 [29:17<03:24, 3.86s/it] {'loss': 1.472, 'grad_norm': 0.0004216913937483934, 'learning_rate': 0.01892669983311939, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:17<03:24, 3.86s/it] 90%|█████████ | 468/520 [29:21<03:20, 3.86s/it] {'loss': 1.3927, 'grad_norm': 0.0005611885251450522, 'learning_rate': 0.018225428841503903, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:21<03:20, 3.86s/it] 90%|█████████ | 469/520 [29:25<03:16, 3.85s/it] {'loss': 1.4432, 'grad_norm': 0.0004301843295573918, 'learning_rate': 0.0175370486645039, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:25<03:16, 3.85s/it] 90%|█████████ | 470/520 [29:29<03:13, 3.86s/it] {'loss': 1.2874, 'grad_norm': 0.0003030920803985678, 'learning_rate': 0.016861586048537172, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:29<03:13, 3.86s/it] 91%|█████████ | 471/520 [29:33<03:09, 3.86s/it] {'loss': 1.3416, 'grad_norm': 0.00045965595168954114, 'learning_rate': 0.01619906723812061, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:33<03:09, 3.86s/it] 91%|█████████ | 472/520 [29:36<03:04, 3.85s/it] {'loss': 1.2887, 'grad_norm': 0.0005177563799438895, 'learning_rate': 0.015549517974850723, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:36<03:04, 3.85s/it] 91%|█████████ | 473/520 [29:40<03:00, 3.85s/it] {'loss': 1.3629, 'grad_norm': 0.0004804202251523636, 'learning_rate': 0.014912963496403675, 'epoch': 0.91} + 91%|█████████ | 473/520 [29:40<03:00, 3.85s/it] 91%|█████████ | 474/520 [29:44<02:56, 3.84s/it] {'loss': 1.4935, 'grad_norm': 0.0004866360470752661, 'learning_rate': 0.014289428535554281, 'epoch': 0.91} + 91%|█████████ | 474/520 [29:44<02:56, 3.84s/it] 91%|█████████▏| 475/520 [29:48<02:53, 3.85s/it] {'loss': 1.3859, 'grad_norm': 0.00046332669179665215, 'learning_rate': 0.013678937319215177, 'epoch': 0.91} + 91%|█████████▏| 475/520 [29:48<02:53, 3.85s/it] 92%|█████████▏| 476/520 [29:52<02:49, 3.85s/it] {'loss': 1.3357, 'grad_norm': 0.0004848184519498144, 'learning_rate': 0.01308151356749579, 'epoch': 0.92} + 92%|█████████▏| 476/520 [29:52<02:49, 3.85s/it] 92%|█████████▏| 477/520 [29:56<02:45, 3.85s/it] {'loss': 1.3359, 'grad_norm': 0.0004334510028873717, 'learning_rate': 0.012497180492780318, 'epoch': 0.92} + 92%|█████████▏| 477/520 [29:56<02:45, 3.85s/it] 92%|█████████▏| 478/520 [29:59<02:42, 3.86s/it] {'loss': 1.2907, 'grad_norm': 0.0005487649638666112, 'learning_rate': 0.01192596079882613, 'epoch': 0.92} + 92%|█████████▏| 478/520 [29:59<02:42, 3.86s/it] 92%|█████████▏| 479/520 [30:03<02:38, 3.86s/it] {'loss': 1.5064, 'grad_norm': 0.0004181333521648994, 'learning_rate': 0.01136787667988136, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:03<02:38, 3.86s/it] 92%|█████████▏| 480/520 [30:07<02:34, 3.86s/it] {'loss': 1.4848, 'grad_norm': 0.00042040316205798603, 'learning_rate': 0.010822949819822751, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:07<02:34, 3.86s/it] 92%|█████████▎| 481/520 [30:11<02:30, 3.87s/it] {'loss': 1.5201, 'grad_norm': 0.0004086547199928083, 'learning_rate': 0.010291201391313164, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:11<02:30, 3.87s/it] 93%|█████████▎| 482/520 [30:15<02:26, 3.85s/it] {'loss': 1.5263, 'grad_norm': 0.0004357541528048414, 'learning_rate': 0.009772652054978924, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:15<02:26, 3.85s/it] 93%|█████████▎| 483/520 [30:19<02:20, 3.79s/it] {'loss': 1.3766, 'grad_norm': 0.0004100710676570031, 'learning_rate': 0.009267321958606828, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:19<02:20, 3.79s/it] 93%|█████████▎| 484/520 [30:22<02:15, 3.76s/it] {'loss': 1.372, 'grad_norm': 0.00040797556902088427, 'learning_rate': 0.008775230736361732, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:22<02:15, 3.76s/it] 93%|█████████▎| 485/520 [30:26<02:10, 3.73s/it] {'loss': 1.3261, 'grad_norm': 0.00036708090693801133, 'learning_rate': 0.008296397508023322, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:26<02:10, 3.73s/it] 93%|█████████▎| 486/520 [30:30<02:06, 3.72s/it] {'loss': 1.4389, 'grad_norm': 0.0004032942184324571, 'learning_rate': 0.00783084087824341, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:30<02:06, 3.72s/it] 94%|█████████▎| 487/520 [30:33<02:02, 3.71s/it] {'loss': 1.2866, 'grad_norm': 0.00037613470663085813, 'learning_rate': 0.007378578935823071, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:33<02:02, 3.71s/it] 94%|█████████▍| 488/520 [30:37<01:58, 3.70s/it] {'loss': 1.2341, 'grad_norm': 0.00048614257062171113, 'learning_rate': 0.00693962925300966, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:37<01:58, 3.70s/it] 94%|█████████▍| 489/520 [30:41<01:54, 3.69s/it] {'loss': 1.502, 'grad_norm': 0.0003445558874333066, 'learning_rate': 0.006514008884814321, 'epoch': 0.94} + 94%|█████████▍| 489/520 [30:41<01:54, 3.69s/it] 94%|█████████▍| 490/520 [30:44<01:50, 3.68s/it] {'loss': 1.3695, 'grad_norm': 0.00045039464658706363, 'learning_rate': 0.006101734368349104, 'epoch': 0.94} + 94%|█████████▍| 490/520 [30:44<01:50, 3.68s/it] 94%|█████████▍| 491/520 [30:48<01:46, 3.69s/it] {'loss': 1.3099, 'grad_norm': 0.00044865818372334003, 'learning_rate': 0.005702821722184536, 'epoch': 0.94} + 94%|█████████▍| 491/520 [30:48<01:46, 3.69s/it] 95%|█████████▍| 492/520 [30:52<01:43, 3.68s/it] {'loss': 1.4469, 'grad_norm': 0.0005384749420020181, 'learning_rate': 0.005317286445727192, 'epoch': 0.95} + 95%|█████████▍| 492/520 [30:52<01:43, 3.68s/it] 95%|█████████▍| 493/520 [30:55<01:39, 3.68s/it] {'loss': 1.6059, 'grad_norm': 0.0004783354766994064, 'learning_rate': 0.00494514351861744, 'epoch': 0.95} + 95%|█████████▍| 493/520 [30:55<01:39, 3.68s/it] 95%|█████████▌| 494/520 [30:59<01:36, 3.73s/it] {'loss': 1.3764, 'grad_norm': 0.00036048105713620105, 'learning_rate': 0.004586407400147618, 'epoch': 0.95} + 95%|█████████▌| 494/520 [30:59<01:36, 3.73s/it] 95%|█████████▌| 495/520 [31:03<01:34, 3.77s/it] {'loss': 1.3252, 'grad_norm': 0.00033511518724935874, 'learning_rate': 0.00424109202869985, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:03<01:34, 3.77s/it] 95%|█████████▌| 496/520 [31:07<01:31, 3.83s/it] {'loss': 1.2672, 'grad_norm': 0.0005673396006473479, 'learning_rate': 0.003909210821205017, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:07<01:31, 3.83s/it] 96%|█████████▌| 497/520 [31:11<01:28, 3.86s/it] {'loss': 1.4269, 'grad_norm': 0.00036036851311765363, 'learning_rate': 0.0035907766726209042, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:11<01:28, 3.86s/it] 96%|█████████▌| 498/520 [31:15<01:25, 3.87s/it] {'loss': 1.3381, 'grad_norm': 0.0004660161085895081, 'learning_rate': 0.0032858019554315165, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:15<01:25, 3.87s/it] 96%|█████████▌| 499/520 [31:19<01:21, 3.88s/it] {'loss': 1.5799, 'grad_norm': 0.00033687001451015595, 'learning_rate': 0.002994298519166366, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:19<01:21, 3.88s/it] 96%|█████████▌| 500/520 [31:23<01:17, 3.88s/it] {'loss': 1.4657, 'grad_norm': 0.0005507650388556847, 'learning_rate': 0.0027162776899397776, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:23<01:17, 3.88s/it] 96%|█████████▋| 501/520 [31:27<01:13, 3.88s/it] {'loss': 1.4903, 'grad_norm': 0.0006299644629064589, 'learning_rate': 0.0024517502700111323, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:27<01:13, 3.88s/it] 97%|█████████▋| 502/520 [31:30<01:09, 3.88s/it] {'loss': 1.3651, 'grad_norm': 0.000507355766476378, 'learning_rate': 0.0022007265373650883, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:30<01:09, 3.88s/it] 97%|█████████▋| 503/520 [31:34<01:05, 3.88s/it] {'loss': 1.4501, 'grad_norm': 0.00041678428279019926, 'learning_rate': 0.0019632162453120825, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:34<01:05, 3.88s/it] 97%|█████████▋| 504/520 [31:38<01:02, 3.90s/it] {'loss': 1.3642, 'grad_norm': 0.0003923734507557975, 'learning_rate': 0.0017392286221095065, 'epoch': 0.97} + 97%|█████████▋| 504/520 [31:38<01:02, 3.90s/it] 97%|█████████▋| 505/520 [31:42<00:57, 3.85s/it] {'loss': 1.4188, 'grad_norm': 0.0004324399525459988, 'learning_rate': 0.0015287723706031652, 'epoch': 0.97} + 97%|█████████▋| 505/520 [31:42<00:57, 3.85s/it] 97%|█████████▋| 506/520 [31:46<00:52, 3.78s/it] {'loss': 1.318, 'grad_norm': 0.00044766367706165354, 'learning_rate': 0.001331855667889059, 'epoch': 0.97} + 97%|█████████▋| 506/520 [31:46<00:52, 3.78s/it] 98%|█████████▊| 507/520 [31:49<00:48, 3.75s/it] {'loss': 1.6277, 'grad_norm': 0.00048354000512234793, 'learning_rate': 0.001148486164995721, 'epoch': 0.97} + 98%|█████████▊| 507/520 [31:49<00:48, 3.75s/it] 98%|█████████▊| 508/520 [31:53<00:44, 3.72s/it] {'loss': 1.4497, 'grad_norm': 0.00040181263876956495, 'learning_rate': 0.0009786709865869548, 'epoch': 0.98} + 98%|█████████▊| 508/520 [31:53<00:44, 3.72s/it] 98%|█████████▊| 509/520 [31:57<00:40, 3.72s/it] {'loss': 1.4092, 'grad_norm': 0.0005770044681795369, 'learning_rate': 0.000822416730684894, 'epoch': 0.98} + 98%|█████████▊| 509/520 [31:57<00:40, 3.72s/it] 98%|█████████▊| 510/520 [32:00<00:37, 3.75s/it] {'loss': 1.3675, 'grad_norm': 0.00039939816969269075, 'learning_rate': 0.0006797294684138533, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:00<00:37, 3.75s/it] 98%|█████████▊| 511/520 [32:04<00:33, 3.73s/it] {'loss': 1.3274, 'grad_norm': 0.0005299643446557271, 'learning_rate': 0.0005506147437641883, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:04<00:33, 3.73s/it] 98%|█████████▊| 512/520 [32:08<00:29, 3.70s/it] {'loss': 1.2147, 'grad_norm': 0.0004676263574804271, 'learning_rate': 0.0004350775733771794, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:08<00:29, 3.70s/it] 99%|█████████▊| 513/520 [32:11<00:25, 3.70s/it] {'loss': 1.4213, 'grad_norm': 0.0004615284696070277, 'learning_rate': 0.0003331224463497706, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:11<00:25, 3.70s/it] 99%|█████████▉| 514/520 [32:15<00:22, 3.68s/it] {'loss': 1.4079, 'grad_norm': 0.0004454686577089654, 'learning_rate': 0.0002447533240604871, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:15<00:22, 3.68s/it] 99%|█████████▉| 515/520 [32:19<00:18, 3.68s/it] {'loss': 1.4699, 'grad_norm': 0.0005122063053658441, 'learning_rate': 0.0001699736400153251, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:19<00:18, 3.68s/it] 99%|█████████▉| 516/520 [32:22<00:14, 3.69s/it] {'loss': 1.3383, 'grad_norm': 0.0005447953125687775, 'learning_rate': 0.00010878629971431407, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:22<00:14, 3.69s/it] 99%|█████████▉| 517/520 [32:26<00:10, 3.66s/it] {'loss': 1.5323, 'grad_norm': 0.0004815109388080205, 'learning_rate': 6.11936805387514e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:26<00:10, 3.66s/it] 100%|█████████▉| 518/520 [32:30<00:07, 3.64s/it] {'loss': 1.3761, 'grad_norm': 0.0003716498558012633, 'learning_rate': 2.7197631658798513e-05, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:30<00:07, 3.64s/it] 100%|█████████▉| 519/520 [32:33<00:03, 3.63s/it] {'loss': 1.4996, 'grad_norm': 0.0006280888281314827, 'learning_rate': 6.799473961632829e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:33<00:03, 3.63s/it] 100%|██████████| 520/520 [32:38<00:00, 3.84s/it] {'loss': 1.5622, 'grad_norm': 0.0005740189716738331, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [32:38<00:00, 3.84s/it] {'train_runtime': 1958.097, 'train_samples_per_second': 33.976, 'train_steps_per_second': 0.266, 'train_loss': 1.468403018437899, 'epoch': 1.0} + 100%|██████████| 520/520 [32:38<00:00, 3.84s/it] 100%|██████████| 520/520 [32:38<00:00, 3.77s/it] +[2025-10-09 10:22:10,601] [INFO] [launch.py:348:main] Process 1130317 exits successfully. +[2025-10-09 10:22:10,602] [INFO] [launch.py:348:main] Process 1130313 exits successfully. +[2025-10-09 10:22:11,603] [INFO] [launch.py:348:main] Process 1130316 exits successfully. +[2025-10-09 10:22:11,604] [INFO] [launch.py:348:main] Process 1130315 exits successfully. +[2025-10-09 10:22:11,604] [INFO] [launch.py:348:main] Process 1130312 exits successfully. +[2025-10-09 10:22:11,604] [INFO] [launch.py:348:main] Process 1130311 exits successfully. +[2025-10-09 10:22:11,605] [INFO] [launch.py:348:main] Process 1130314 exits successfully. +[2025-10-09 10:22:15,609] [INFO] [launch.py:348:main] Process 1130310 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_7e-1_connector-3.0_0.5_7e-1_ablation_20251009_094325.log +Timestamp: 2025-10-09 10:22:18 +===================================== diff --git a/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251009_065816.log b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251009_065816.log new file mode 100644 index 0000000000000000000000000000000000000000..cf7886740656ca382df98c2de207bc60e74b50e4 --- /dev/null +++ b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251009_065816.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251009_065816.log +Timestamp: 2025-10-09 06:58:16 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 06:58:18,964] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:58:22,099] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-09 06:58:22,100] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 9 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 9 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 06:58:24,754] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:58:25,792] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-09 06:58:25,792] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-09 06:58:25,792] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-09 06:58:25,792] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-09 06:58:25,792] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-09 06:58:25,792] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-09 06:58:25,792] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-09 06:58:25,794] [INFO] [launch.py:253:main] process 867091 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:58:25,796] [INFO] [launch.py:253:main] process 867092 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:58:25,798] [INFO] [launch.py:253:main] process 867093 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:58:25,800] [INFO] [launch.py:253:main] process 867094 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:58:25,802] [INFO] [launch.py:253:main] process 867095 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:58:25,804] [INFO] [launch.py:253:main] process 867096 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:58:25,806] [INFO] [launch.py:253:main] process 867097 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 06:58:25,808] [INFO] [launch.py:253:main] process 867098 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 06:58:32,439] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:58:32,755] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:58:32,757] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:58:32,837] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:58:32,837] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:58:32,838] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:58:32,838] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:58:32,838] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 06:58:32,844] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:58:33,154] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:58:33,162] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:58:33,162] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-09 06:58:33,235] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:58:33,239] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:58:33,248] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:58:33,248] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 06:58:33,249] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +ywang29-vrdb-test1-worker-0:867091:867091 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867091:867091 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867091:867091 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:867091:867091 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:867091:867091 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:867091:867091 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Using network Socket +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:867093:867093 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:867093:867093 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867093:867093 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867093:867093 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:867093:867093 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:867093:867093 [2] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:867092:867092 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:867092:867092 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867092:867092 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867092:867092 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:867092:867092 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:867092:867092 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:867098:867098 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:867098:867098 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867098:867098 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867098:867098 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:867098:867098 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:867098:867098 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:867095:867095 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:867095:867095 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867095:867095 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867095:867095 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:867095:867095 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:867095:867095 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:867097:867097 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:867097:867097 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867097:867097 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867097:867097 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:867097:867097 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:867097:867097 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:867096:867096 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:867096:867096 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867096:867096 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867096:867096 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:867096:867096 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:867096:867096 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:867094:867094 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:867094:867094 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867094:867094 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867094:867094 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:867094:867094 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:867094:867094 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO ncclCommInitRank comm 0x5593fbcb9d30 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x7b5af781b5d85f3d - Init START +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO ncclCommInitRank comm 0x55697b862c20 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x7b5af781b5d85f3d - Init START +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO ncclCommInitRank comm 0x5584f9bc8210 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x7b5af781b5d85f3d - Init START +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO ncclCommInitRank comm 0x5648be8c3da0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x7b5af781b5d85f3d - Init START +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO ncclCommInitRank comm 0x5605446a0a60 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x7b5af781b5d85f3d - Init START +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO ncclCommInitRank comm 0x56522a3b7590 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x7b5af781b5d85f3d - Init START +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO ncclCommInitRank comm 0x5602f6c00250 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x7b5af781b5d85f3d - Init START +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO ncclCommInitRank comm 0x563bd22380d0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x7b5af781b5d85f3d - Init START +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO comm 0x5605446a0a60 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO comm 0x5593fbcb9d30 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO comm 0x563bd22380d0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO comm 0x55697b862c20 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO comm 0x5648be8c3da0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO comm 0x5584f9bc8210 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO comm 0x5602f6c00250 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO comm 0x56522a3b7590 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:867095:868695 [4] NCCL INFO ncclCommInitRank comm 0x5648be8c3da0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0x7b5af781b5d85f3d - Init COMPLETE +ywang29-vrdb-test1-worker-0:867098:868694 [7] NCCL INFO ncclCommInitRank comm 0x5605446a0a60 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0x7b5af781b5d85f3d - Init COMPLETE +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:867096:868697 [5] NCCL INFO ncclCommInitRank comm 0x55697b862c20 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0x7b5af781b5d85f3d - Init COMPLETE +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:867094:868698 [3] NCCL INFO ncclCommInitRank comm 0x5593fbcb9d30 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0x7b5af781b5d85f3d - Init COMPLETE +ywang29-vrdb-test1-worker-0:867092:868688 [1] NCCL INFO ncclCommInitRank comm 0x56522a3b7590 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0x7b5af781b5d85f3d - Init COMPLETE +ywang29-vrdb-test1-worker-0:867091:868675 [0] NCCL INFO ncclCommInitRank comm 0x5602f6c00250 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0x7b5af781b5d85f3d - Init COMPLETE +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:867097:868696 [6] NCCL INFO ncclCommInitRank comm 0x563bd22380d0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0x7b5af781b5d85f3d - Init COMPLETE +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:867093:868681 [2] NCCL INFO ncclCommInitRank comm 0x5584f9bc8210 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0x7b5af781b5d85f3d - Init COMPLETE +[2025-10-09 06:59:20,410] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-09 06:59:22,174] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-09 06:59:35,591 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-09 06:59:35,595 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:003->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:867096:873689 [5] NCCL INFO ncclCommInitRank comm 0x7f958406b030 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xa2948f2a2a1de045 - Init COMPLETE +ywang29-vrdb-test1-worker-0:867098:873695 [7] NCCL INFO ncclCommInitRank comm 0x7efa5806aaf0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xa2948f2a2a1de045 - Init COMPLETE +ywang29-vrdb-test1-worker-0:867097:873693 [6] NCCL INFO ncclCommInitRank comm 0x7f113006b5e0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xa2948f2a2a1de045 - Init COMPLETE +ywang29-vrdb-test1-worker-0:867094:873690 [3] NCCL INFO ncclCommInitRank comm 0x7f53e806b480 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xa2948f2a2a1de045 - Init COMPLETE +ywang29-vrdb-test1-worker-0:867092:873691 [1] NCCL INFO ncclCommInitRank comm 0x7f072c06b3c0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xa2948f2a2a1de045 - Init COMPLETE +ywang29-vrdb-test1-worker-0:867093:873694 [2] NCCL INFO ncclCommInitRank comm 0x7f4cb806b3b0 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xa2948f2a2a1de045 - Init COMPLETE +ywang29-vrdb-test1-worker-0:867095:873692 [4] NCCL INFO ncclCommInitRank comm 0x7fcb3c06aea0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xa2948f2a2a1de045 - Init COMPLETE +ywang29-vrdb-test1-worker-0:867091:873688 [0] NCCL INFO ncclCommInitRank comm 0x7fcb0c06b1b0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xa2948f2a2a1de045 - Init COMPLETE + 0%| | 1/520 [00:12<1:48:34, 12.55s/it] {'loss': 2.0453, 'grad_norm': 0.0048339019471763224, 'learning_rate': 0.5625, 'epoch': 0.0} + 0%| | 1/520 [00:12<1:48:34, 12.55s/it] 0%| | 2/520 [00:16<1:04:45, 7.50s/it] {'loss': 2.0549, 'grad_norm': 0.005248432645928696, 'learning_rate': 1.125, 'epoch': 0.0} + 0%| | 2/520 [00:16<1:04:45, 7.50s/it] 1%| | 3/520 [00:20<50:25, 5.85s/it] {'loss': 1.7172, 'grad_norm': 0.003263251391712435, 'learning_rate': 1.6875, 'epoch': 0.01} + 1%| | 3/520 [00:20<50:25, 5.85s/it] 1%| | 4/520 [00:24<43:37, 5.07s/it] {'loss': 2.5692, 'grad_norm': 0.02507216128527823, 'learning_rate': 2.25, 'epoch': 0.01} + 1%| | 4/520 [00:24<43:37, 5.07s/it] 1%| | 5/520 [00:28<40:27, 4.71s/it] {'loss': 9.5123, 'grad_norm': 0.814434727611237, 'learning_rate': 2.8125, 'epoch': 0.01} + 1%| | 5/520 [00:28<40:27, 4.71s/it] 1%| | 6/520 [00:32<38:45, 4.52s/it] {'loss': 14.6128, 'grad_norm': 0.4859090215996043, 'learning_rate': 3.375, 'epoch': 0.01} + 1%| | 6/520 [00:32<38:45, 4.52s/it] 1%|▏ | 7/520 [00:36<37:33, 4.39s/it] {'loss': 18.3749, 'grad_norm': 1.1013730899783754, 'learning_rate': 3.9375, 'epoch': 0.01} + 1%|▏ | 7/520 [00:36<37:33, 4.39s/it] 2%|▏ | 8/520 [00:41<37:51, 4.44s/it] {'loss': 17.0603, 'grad_norm': 0.16014182525066925, 'learning_rate': 4.5, 'epoch': 0.02} + 2%|▏ | 8/520 [00:41<37:51, 4.44s/it] 2%|▏ | 9/520 [00:45<37:37, 4.42s/it] {'loss': 15.243, 'grad_norm': 0.0554478407170319, 'learning_rate': 5.0625, 'epoch': 0.02} + 2%|▏ | 9/520 [00:45<37:37, 4.42s/it] 2%|▏ | 10/520 [00:49<36:17, 4.27s/it] {'loss': 12.946, 'grad_norm': 0.030621276132501398, 'learning_rate': 5.625, 'epoch': 0.02} + 2%|▏ | 10/520 [00:49<36:17, 4.27s/it] 2%|▏ | 11/520 [00:53<35:33, 4.19s/it] {'loss': 21.9825, 'grad_norm': 0.13148298820871884, 'learning_rate': 6.1875, 'epoch': 0.02} + 2%|▏ | 11/520 [00:53<35:33, 4.19s/it] 2%|▏ | 12/520 [00:57<34:38, 4.09s/it] {'loss': 12.3227, 'grad_norm': 0.008127449038588558, 'learning_rate': 6.75, 'epoch': 0.02} + 2%|▏ | 12/520 [00:57<34:38, 4.09s/it][2025-10-09 07:00:41,706] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:01<35:33, 4.21s/it] {'loss': 12.1731, 'grad_norm': 0.003816472444597421, 'learning_rate': 7.3125, 'epoch': 0.03} + 2%|▎ | 13/520 [01:01<35:33, 4.21s/it] 3%|▎ | 14/520 [01:05<34:36, 4.10s/it] {'loss': 12.0955, 'grad_norm': 0.002524738222782024, 'learning_rate': 7.875, 'epoch': 0.03} + 3%|▎ | 14/520 [01:05<34:36, 4.10s/it] 3%|▎ | 15/520 [01:09<33:28, 3.98s/it] {'loss': 11.6273, 'grad_norm': 0.002361252486885311, 'learning_rate': 8.4375, 'epoch': 0.03} + 3%|▎ | 15/520 [01:09<33:28, 3.98s/it] 3%|▎ | 16/520 [01:13<32:39, 3.89s/it] {'loss': 11.8516, 'grad_norm': 0.003438644037282576, 'learning_rate': 9.0, 'epoch': 0.03} + 3%|▎ | 16/520 [01:13<32:39, 3.89s/it] 3%|▎ | 17/520 [01:16<32:13, 3.84s/it] {'loss': 11.3608, 'grad_norm': 0.009876770318408204, 'learning_rate': 8.999912578191921, 'epoch': 0.03} + 3%|▎ | 17/520 [01:16<32:13, 3.84s/it] 3%|▎ | 18/520 [01:20<31:49, 3.80s/it] {'loss': 11.4231, 'grad_norm': 0.0008607449766564826, 'learning_rate': 8.999650316164386, 'epoch': 0.03} + 3%|▎ | 18/520 [01:20<31:49, 3.80s/it] 4%|▎ | 19/520 [01:24<31:38, 3.79s/it] {'loss': 11.3653, 'grad_norm': 0.00062044295214833, 'learning_rate': 8.99921322410736, 'epoch': 0.04} + 4%|▎ | 19/520 [01:24<31:38, 3.79s/it] 4%|▍ | 20/520 [01:28<31:29, 3.78s/it] {'loss': 11.5577, 'grad_norm': 0.0005610401169463515, 'learning_rate': 8.998601319003672, 'epoch': 0.04} + 4%|▍ | 20/520 [01:28<31:29, 3.78s/it] 4%|▍ | 21/520 [01:31<31:18, 3.77s/it] {'loss': 11.8979, 'grad_norm': 0.00041645694221802593, 'learning_rate': 8.997814624628374, 'epoch': 0.04} + 4%|▍ | 21/520 [01:31<31:18, 3.77s/it] 4%|▍ | 22/520 [01:35<31:09, 3.75s/it] {'loss': 10.9231, 'grad_norm': 0.0004460401515251962, 'learning_rate': 8.996853171547794, 'epoch': 0.04} + 4%|▍ | 22/520 [01:35<31:09, 3.75s/it] 4%|▍ | 23/520 [01:39<30:58, 3.74s/it] {'loss': 10.845, 'grad_norm': 0.00041191570128775053, 'learning_rate': 8.995716997118361, 'epoch': 0.04} + 4%|▍ | 23/520 [01:39<30:58, 3.74s/it] 5%|▍ | 24/520 [01:42<30:44, 3.72s/it] {'loss': 11.0932, 'grad_norm': 0.0003336064493352525, 'learning_rate': 8.99440614548515, 'epoch': 0.05} + 5%|▍ | 24/520 [01:42<30:44, 3.72s/it] 5%|▍ | 25/520 [01:46<30:40, 3.72s/it] {'loss': 11.3893, 'grad_norm': 0.00029192704452717886, 'learning_rate': 8.992920667580174, 'epoch': 0.05} + 5%|▍ | 25/520 [01:46<30:40, 3.72s/it] 5%|▌ | 26/520 [01:50<30:34, 3.71s/it] {'loss': 10.9686, 'grad_norm': 0.0003146307466747535, 'learning_rate': 8.991260621120393, 'epoch': 0.05} + 5%|▌ | 26/520 [01:50<30:34, 3.71s/it] 5%|▌ | 27/520 [01:54<30:57, 3.77s/it] {'loss': 11.0575, 'grad_norm': 0.0002713111044734644, 'learning_rate': 8.98942607060548, 'epoch': 0.05} + 5%|▌ | 27/520 [01:54<30:57, 3.77s/it] 5%|▌ | 28/520 [01:58<31:07, 3.80s/it] {'loss': 11.288, 'grad_norm': 0.0002806062845227299, 'learning_rate': 8.98741708731531, 'epoch': 0.05} + 5%|▌ | 28/520 [01:58<31:07, 3.80s/it] 6%|▌ | 29/520 [02:01<30:45, 3.76s/it] {'loss': 11.077, 'grad_norm': 0.0002454029309245491, 'learning_rate': 8.985233749307199, 'epoch': 0.06} + 6%|▌ | 29/520 [02:01<30:45, 3.76s/it] 6%|▌ | 30/520 [02:05<30:29, 3.73s/it] {'loss': 11.02, 'grad_norm': 0.000220394436079997, 'learning_rate': 8.982876141412856, 'epoch': 0.06} + 6%|▌ | 30/520 [02:05<30:29, 3.73s/it] 6%|▌ | 31/520 [02:09<30:18, 3.72s/it] {'loss': 10.4185, 'grad_norm': 0.00027815938330792397, 'learning_rate': 8.980344355235102, 'epoch': 0.06} + 6%|▌ | 31/520 [02:09<30:18, 3.72s/it] 6%|▌ | 32/520 [02:12<30:17, 3.73s/it] {'loss': 11.6629, 'grad_norm': 0.0001646091310180282, 'learning_rate': 8.977638489144306, 'epoch': 0.06} + 6%|▌ | 32/520 [02:12<30:17, 3.73s/it] 6%|▋ | 33/520 [02:16<30:19, 3.74s/it] {'loss': 11.295, 'grad_norm': 0.00021479801826806378, 'learning_rate': 8.974758648274559, 'epoch': 0.06} + 6%|▋ | 33/520 [02:16<30:19, 3.74s/it] 7%|▋ | 34/520 [02:20<30:03, 3.71s/it] {'loss': 11.2546, 'grad_norm': 0.0002974707778149163, 'learning_rate': 8.971704944519592, 'epoch': 0.07} + 7%|▋ | 34/520 [02:20<30:03, 3.71s/it] 7%|▋ | 35/520 [02:23<29:54, 3.70s/it] {'loss': 11.3369, 'grad_norm': 0.00021916727083852948, 'learning_rate': 8.968477496528427, 'epoch': 0.07} + 7%|▋ | 35/520 [02:23<29:54, 3.70s/it] 7%|▋ | 36/520 [02:27<29:55, 3.71s/it] {'loss': 11.0546, 'grad_norm': 0.00018081734855616145, 'learning_rate': 8.965076429700774, 'epoch': 0.07} + 7%|▋ | 36/520 [02:27<29:55, 3.71s/it] 7%|▋ | 37/520 [02:31<29:39, 3.68s/it] {'loss': 11.0719, 'grad_norm': 0.00018573260969634065, 'learning_rate': 8.961501876182147, 'epoch': 0.07} + 7%|▋ | 37/520 [02:31<29:39, 3.68s/it] 7%|▋ | 38/520 [02:35<29:46, 3.71s/it] {'loss': 10.7912, 'grad_norm': 0.0001893598347310922, 'learning_rate': 8.957753974858736, 'epoch': 0.07} + 7%|▋ | 38/520 [02:35<29:46, 3.71s/it] 8%|▊ | 39/520 [02:38<30:12, 3.77s/it] {'loss': 11.0986, 'grad_norm': 0.00017207096337663964, 'learning_rate': 8.953832871352017, 'epoch': 0.07} + 8%|▊ | 39/520 [02:38<30:12, 3.77s/it] 8%|▊ | 40/520 [02:42<30:18, 3.79s/it] {'loss': 10.5942, 'grad_norm': 0.00016696275249829837, 'learning_rate': 8.949738718013078, 'epoch': 0.08} + 8%|▊ | 40/520 [02:42<30:18, 3.79s/it] 8%|▊ | 41/520 [02:46<30:23, 3.81s/it] {'loss': 11.1656, 'grad_norm': 0.0001320927135647812, 'learning_rate': 8.945471673916716, 'epoch': 0.08} + 8%|▊ | 41/520 [02:46<30:23, 3.81s/it] 8%|▊ | 42/520 [02:50<30:23, 3.81s/it] {'loss': 11.5879, 'grad_norm': 0.00011915647189680943, 'learning_rate': 8.941031904855246, 'epoch': 0.08} + 8%|▊ | 42/520 [02:50<30:23, 3.81s/it] 8%|▊ | 43/520 [02:54<30:20, 3.82s/it] {'loss': 11.5121, 'grad_norm': 0.00010957359472167427, 'learning_rate': 8.93641958333206, 'epoch': 0.08} + 8%|▊ | 43/520 [02:54<30:20, 3.82s/it] 8%|▊ | 44/520 [02:57<30:02, 3.79s/it] {'loss': 11.2382, 'grad_norm': 0.00010436952960031211, 'learning_rate': 8.931634888554937, 'epoch': 0.08} + 8%|▊ | 44/520 [02:57<30:02, 3.79s/it] 9%|▊ | 45/520 [03:01<29:44, 3.76s/it] {'loss': 10.9942, 'grad_norm': 0.0001262269910955057, 'learning_rate': 8.926678006429055, 'epoch': 0.09} + 9%|▊ | 45/520 [03:01<29:44, 3.76s/it] 9%|▉ | 46/520 [03:05<29:26, 3.73s/it] {'loss': 11.3161, 'grad_norm': 8.680773545331246e-05, 'learning_rate': 8.921549129549797, 'epoch': 0.09} + 9%|▉ | 46/520 [03:05<29:26, 3.73s/it] 9%|▉ | 47/520 [03:08<29:12, 3.71s/it] {'loss': 10.9117, 'grad_norm': 0.00010302298091051924, 'learning_rate': 8.916248457195245, 'epoch': 0.09} + 9%|▉ | 47/520 [03:08<29:12, 3.71s/it] 9%|▉ | 48/520 [03:12<29:00, 3.69s/it] {'loss': 11.1866, 'grad_norm': 0.0001013578035245095, 'learning_rate': 8.910776195318448, 'epoch': 0.09} + 9%|▉ | 48/520 [03:12<29:00, 3.69s/it] 9%|▉ | 49/520 [03:16<29:14, 3.72s/it] {'loss': 10.7891, 'grad_norm': 0.00011148571920325176, 'learning_rate': 8.905132556539417, 'epoch': 0.09} + 9%|▉ | 49/520 [03:16<29:14, 3.72s/it] 10%|▉ | 50/520 [03:20<29:19, 3.74s/it] {'loss': 10.8292, 'grad_norm': 0.00012034015622321751, 'learning_rate': 8.89931776013687, 'epoch': 0.1} + 10%|▉ | 50/520 [03:20<29:19, 3.74s/it] 10%|▉ | 51/520 [03:24<29:23, 3.76s/it] {'loss': 11.1106, 'grad_norm': 0.00011104252406012696, 'learning_rate': 8.8933320320397, 'epoch': 0.1} + 10%|▉ | 51/520 [03:24<29:23, 3.76s/it] 10%|█ | 52/520 [03:27<29:39, 3.80s/it] {'loss': 11.13, 'grad_norm': 9.901879383973248e-05, 'learning_rate': 8.887175604818205, 'epoch': 0.1} + 10%|█ | 52/520 [03:27<29:39, 3.80s/it] 10%|█ | 53/520 [03:31<29:36, 3.80s/it] {'loss': 10.5763, 'grad_norm': 0.00010595794603060473, 'learning_rate': 8.880848717675054, 'epoch': 0.1} + 10%|█ | 53/520 [03:31<29:36, 3.80s/it] 10%|█ | 54/520 [03:35<29:33, 3.81s/it] {'loss': 10.5341, 'grad_norm': 0.0001343460645883946, 'learning_rate': 8.874351616435986, 'epoch': 0.1} + 10%|█ | 54/520 [03:35<29:33, 3.81s/it] 11%|█ | 55/520 [03:39<29:32, 3.81s/it] {'loss': 11.2879, 'grad_norm': 0.00014895846291404358, 'learning_rate': 8.86768455354026, 'epoch': 0.11} + 11%|█ | 55/520 [03:39<29:32, 3.81s/it] 11%|█ | 56/520 [03:43<29:29, 3.81s/it] {'loss': 10.9418, 'grad_norm': 0.00016570486901343865, 'learning_rate': 8.860847788030851, 'epoch': 0.11} + 11%|█ | 56/520 [03:43<29:29, 3.81s/it] 11%|█ | 57/520 [03:47<29:32, 3.83s/it] {'loss': 10.851, 'grad_norm': 0.0002052038797434145, 'learning_rate': 8.853841585544384, 'epoch': 0.11} + 11%|█ | 57/520 [03:47<29:32, 3.83s/it] 11%|█ | 58/520 [03:50<29:22, 3.82s/it] {'loss': 10.332, 'grad_norm': 0.00020018692489775852, 'learning_rate': 8.846666218300808, 'epoch': 0.11} + 11%|█ | 58/520 [03:50<29:22, 3.82s/it] 11%|█▏ | 59/520 [03:54<29:17, 3.81s/it] {'loss': 10.8171, 'grad_norm': 9.82186447316401e-05, 'learning_rate': 8.839321965092825, 'epoch': 0.11} + 11%|█▏ | 59/520 [03:54<29:17, 3.81s/it] 12%|█▏ | 60/520 [03:58<29:12, 3.81s/it] {'loss': 10.78, 'grad_norm': 9.547022328702222e-05, 'learning_rate': 8.831809111275053, 'epoch': 0.12} + 12%|█▏ | 60/520 [03:58<29:12, 3.81s/it] 12%|█▏ | 61/520 [04:02<29:06, 3.80s/it] {'loss': 11.4256, 'grad_norm': 0.00013973600340823407, 'learning_rate': 8.824127948752949, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:02<29:06, 3.80s/it] 12%|█▏ | 62/520 [04:06<29:07, 3.81s/it] {'loss': 11.0089, 'grad_norm': 0.00012775170041906425, 'learning_rate': 8.816278775971444, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:06<29:07, 3.81s/it] 12%|█▏ | 63/520 [04:09<29:00, 3.81s/it] {'loss': 10.6127, 'grad_norm': 7.969190507547624e-05, 'learning_rate': 8.808261897903382, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:09<29:00, 3.81s/it] 12%|█▏ | 64/520 [04:13<28:43, 3.78s/it] {'loss': 10.5711, 'grad_norm': 7.344320712732982e-05, 'learning_rate': 8.800077626037634, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:13<28:43, 3.78s/it] 12%|█▎ | 65/520 [04:17<28:30, 3.76s/it] {'loss': 11.3642, 'grad_norm': 6.382522514816696e-05, 'learning_rate': 8.79172627836702, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:17<28:30, 3.76s/it] 13%|█▎ | 66/520 [04:21<28:19, 3.74s/it] {'loss': 10.7528, 'grad_norm': 7.233078202725312e-05, 'learning_rate': 8.78320817937595, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:21<28:19, 3.74s/it] 13%|█▎ | 67/520 [04:24<28:23, 3.76s/it] {'loss': 11.0601, 'grad_norm': 7.109948701948322e-05, 'learning_rate': 8.774523660027807, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:24<28:23, 3.76s/it] 13%|█▎ | 68/520 [04:28<28:28, 3.78s/it] {'loss': 10.4624, 'grad_norm': 7.363014926898099e-05, 'learning_rate': 8.765673057752092, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:28<28:28, 3.78s/it] 13%|█▎ | 69/520 [04:32<28:30, 3.79s/it] {'loss': 10.9601, 'grad_norm': 6.127331256126576e-05, 'learning_rate': 8.756656716431321, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:32<28:30, 3.79s/it] 13%|█▎ | 70/520 [04:36<28:15, 3.77s/it] {'loss': 10.8031, 'grad_norm': 6.217462815484208e-05, 'learning_rate': 8.747474986387655, 'epoch': 0.13} + 13%|█▎ | 70/520 [04:36<28:15, 3.77s/it] 14%|█▎ | 71/520 [04:39<28:05, 3.75s/it] {'loss': 10.7762, 'grad_norm': 6.19838173085146e-05, 'learning_rate': 8.738128224369285, 'epoch': 0.14} + 14%|█▎ | 71/520 [04:39<28:05, 3.75s/it] 14%|█▍ | 72/520 [04:43<28:12, 3.78s/it] {'loss': 10.81, 'grad_norm': 6.1704635980524e-05, 'learning_rate': 8.728616793536588, 'epoch': 0.14} + 14%|█▍ | 72/520 [04:43<28:12, 3.78s/it] 14%|█▍ | 73/520 [04:47<28:24, 3.81s/it] {'loss': 10.7427, 'grad_norm': 7.13394568368871e-05, 'learning_rate': 8.718941063447996, 'epoch': 0.14} + 14%|█▍ | 73/520 [04:47<28:24, 3.81s/it] 14%|█▍ | 74/520 [04:51<28:21, 3.81s/it] {'loss': 11.0302, 'grad_norm': 6.419229173325252e-05, 'learning_rate': 8.709101410045653, 'epoch': 0.14} + 14%|█▍ | 74/520 [04:51<28:21, 3.81s/it] 14%|█▍ | 75/520 [04:55<28:16, 3.81s/it] {'loss': 10.2771, 'grad_norm': 7.693903769500423e-05, 'learning_rate': 8.699098215640799, 'epoch': 0.14} + 14%|█▍ | 75/520 [04:55<28:16, 3.81s/it] 15%|█▍ | 76/520 [04:58<28:01, 3.79s/it] {'loss': 10.6778, 'grad_norm': 5.9730282485900264e-05, 'learning_rate': 8.68893186889892, 'epoch': 0.15} + 15%|█▍ | 76/520 [04:58<28:01, 3.79s/it] 15%|█▍ | 77/520 [05:02<27:44, 3.76s/it] {'loss': 11.4273, 'grad_norm': 6.130201765351441e-05, 'learning_rate': 8.67860276482464, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:02<27:44, 3.76s/it] 15%|█▌ | 78/520 [05:06<27:33, 3.74s/it] {'loss': 10.4892, 'grad_norm': 6.118790063195681e-05, 'learning_rate': 8.66811130474639, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:06<27:33, 3.74s/it] 15%|█▌ | 79/520 [05:10<27:25, 3.73s/it] {'loss': 10.5907, 'grad_norm': 5.286374042888883e-05, 'learning_rate': 8.65745789630079, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:10<27:25, 3.73s/it] 15%|█▌ | 80/520 [05:13<27:16, 3.72s/it] {'loss': 11.4451, 'grad_norm': 3.525605192908081e-05, 'learning_rate': 8.646642953416833, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:13<27:16, 3.72s/it] 16%|█▌ | 81/520 [05:17<27:12, 3.72s/it] {'loss': 11.5818, 'grad_norm': 4.509202782832925e-05, 'learning_rate': 8.635666896299792, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:17<27:12, 3.72s/it] 16%|█▌ | 82/520 [05:21<27:08, 3.72s/it] {'loss': 10.8345, 'grad_norm': 4.7820729894676586e-05, 'learning_rate': 8.624530151414893, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:21<27:08, 3.72s/it] 16%|█▌ | 83/520 [05:24<27:14, 3.74s/it] {'loss': 11.1825, 'grad_norm': 4.987258949586549e-05, 'learning_rate': 8.613233151470745, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:24<27:14, 3.74s/it] 16%|█▌ | 84/520 [05:28<27:04, 3.73s/it] {'loss': 10.7862, 'grad_norm': 6.006618004938216e-05, 'learning_rate': 8.60177633540253, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:28<27:04, 3.73s/it] 16%|█▋ | 85/520 [05:32<27:04, 3.73s/it] {'loss': 10.4276, 'grad_norm': 6.131469680055396e-05, 'learning_rate': 8.590160148354949, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:32<27:04, 3.73s/it] 17%|█▋ | 86/520 [05:36<27:18, 3.77s/it] {'loss': 10.8564, 'grad_norm': 5.802223931183733e-05, 'learning_rate': 8.578385041664925, 'epoch': 0.17} + 17%|█▋ | 86/520 [05:36<27:18, 3.77s/it] 17%|█▋ | 87/520 [05:40<27:24, 3.80s/it] {'loss': 11.5914, 'grad_norm': 3.739946786274878e-05, 'learning_rate': 8.566451472844065, 'epoch': 0.17} + 17%|█▋ | 87/520 [05:40<27:24, 3.80s/it] 17%|█▋ | 88/520 [05:44<27:30, 3.82s/it] {'loss': 11.1907, 'grad_norm': 4.093888969375698e-05, 'learning_rate': 8.554359905560887, 'epoch': 0.17} + 17%|█▋ | 88/520 [05:44<27:30, 3.82s/it] 17%|█▋ | 89/520 [05:47<27:23, 3.81s/it] {'loss': 10.8071, 'grad_norm': 6.176638589699314e-05, 'learning_rate': 8.542110809622798, 'epoch': 0.17} + 17%|█▋ | 89/520 [05:47<27:23, 3.81s/it] 17%|█▋ | 90/520 [05:51<27:16, 3.81s/it] {'loss': 10.9984, 'grad_norm': 5.498336241673668e-05, 'learning_rate': 8.529704660957854, 'epoch': 0.17} + 17%|█▋ | 90/520 [05:51<27:16, 3.81s/it] 18%|█▊ | 91/520 [05:55<27:21, 3.83s/it] {'loss': 10.4891, 'grad_norm': 4.003898355540358e-05, 'learning_rate': 8.517141941596252, 'epoch': 0.17} + 18%|█▊ | 91/520 [05:55<27:21, 3.83s/it] 18%|█▊ | 92/520 [05:59<27:19, 3.83s/it] {'loss': 10.5, 'grad_norm': 5.1487378883470805e-05, 'learning_rate': 8.50442313965161, 'epoch': 0.18} + 18%|█▊ | 92/520 [05:59<27:19, 3.83s/it] 18%|█▊ | 93/520 [06:03<27:15, 3.83s/it] {'loss': 11.1456, 'grad_norm': 5.0370577665536825e-05, 'learning_rate': 8.491548749301998, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:03<27:15, 3.83s/it] 18%|█▊ | 94/520 [06:07<27:22, 3.85s/it] {'loss': 10.8559, 'grad_norm': 4.229703482458359e-05, 'learning_rate': 8.478519270770743, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:07<27:22, 3.85s/it] 18%|█▊ | 95/520 [06:10<27:13, 3.84s/it] {'loss': 11.1792, 'grad_norm': 4.655240471063046e-05, 'learning_rate': 8.465335210306991, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:10<27:13, 3.84s/it] 18%|█▊ | 96/520 [06:14<26:53, 3.80s/it] {'loss': 10.4126, 'grad_norm': 6.788280947160659e-05, 'learning_rate': 8.451997080166029, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:14<26:53, 3.80s/it] 19%|█▊ | 97/520 [06:18<26:43, 3.79s/it] {'loss': 11.5649, 'grad_norm': 9.080698282131412e-05, 'learning_rate': 8.438505398589392, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:18<26:43, 3.79s/it] 19%|█▉ | 98/520 [06:22<26:39, 3.79s/it] {'loss': 10.0975, 'grad_norm': 5.853505492773981e-05, 'learning_rate': 8.424860689784724, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:22<26:39, 3.79s/it] 19%|█▉ | 99/520 [06:25<26:40, 3.80s/it] {'loss': 11.0986, 'grad_norm': 4.547638825702508e-05, 'learning_rate': 8.411063483905409, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:25<26:40, 3.80s/it] 19%|█▉ | 100/520 [06:29<26:31, 3.79s/it] {'loss': 11.2187, 'grad_norm': 4.302879322526872e-05, 'learning_rate': 8.397114317029974, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:29<26:31, 3.79s/it] 19%|█▉ | 101/520 [06:33<26:16, 3.76s/it] {'loss': 10.573, 'grad_norm': 4.379410059385619e-05, 'learning_rate': 8.383013731141258, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:33<26:16, 3.76s/it] 20%|█▉ | 102/520 [06:37<26:14, 3.77s/it] {'loss': 11.3187, 'grad_norm': 4.547942337376878e-05, 'learning_rate': 8.368762274105356, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:37<26:14, 3.77s/it] 20%|█▉ | 103/520 [06:40<26:06, 3.76s/it] {'loss': 10.1411, 'grad_norm': 7.032437421612749e-05, 'learning_rate': 8.354360499650332, 'epoch': 0.2} + 20%|█▉ | 103/520 [06:40<26:06, 3.76s/it] 20%|██ | 104/520 [06:44<26:13, 3.78s/it] {'loss': 11.0615, 'grad_norm': 4.5197759718783944e-05, 'learning_rate': 8.3398089673447, 'epoch': 0.2} + 20%|██ | 104/520 [06:44<26:13, 3.78s/it] 20%|██ | 105/520 [06:48<26:05, 3.77s/it] {'loss': 10.6536, 'grad_norm': 5.660101867767557e-05, 'learning_rate': 8.325108242575691, 'epoch': 0.2} + 20%|██ | 105/520 [06:48<26:05, 3.77s/it] 20%|██ | 106/520 [06:52<25:49, 3.74s/it] {'loss': 10.7535, 'grad_norm': 3.298747642867721e-05, 'learning_rate': 8.310258896527278, 'epoch': 0.2} + 20%|██ | 106/520 [06:52<25:49, 3.74s/it] 21%|██ | 107/520 [06:55<25:36, 3.72s/it] {'loss': 10.8338, 'grad_norm': 3.698442672472239e-05, 'learning_rate': 8.295261506157985, 'epoch': 0.21} + 21%|██ | 107/520 [06:55<25:36, 3.72s/it] 21%|██ | 108/520 [06:59<25:32, 3.72s/it] {'loss': 11.1554, 'grad_norm': 3.5335064300454636e-05, 'learning_rate': 8.280116654178473, 'epoch': 0.21} + 21%|██ | 108/520 [06:59<25:32, 3.72s/it] 21%|██ | 109/520 [07:03<25:22, 3.71s/it] {'loss': 11.3848, 'grad_norm': 2.863569499991495e-05, 'learning_rate': 8.264824929028888, 'epoch': 0.21} + 21%|██ | 109/520 [07:03<25:22, 3.71s/it] 21%|██ | 110/520 [07:07<25:26, 3.72s/it] {'loss': 10.7495, 'grad_norm': 3.24651573657765e-05, 'learning_rate': 8.24938692485602, 'epoch': 0.21} + 21%|██ | 110/520 [07:07<25:26, 3.72s/it] 21%|██▏ | 111/520 [07:10<25:19, 3.72s/it] {'loss': 11.1322, 'grad_norm': 3.5251429615762146e-05, 'learning_rate': 8.233803241490193, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:10<25:19, 3.72s/it] 22%|██▏ | 112/520 [07:14<25:18, 3.72s/it] {'loss': 10.6031, 'grad_norm': 4.004471380906828e-05, 'learning_rate': 8.218074484421978, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:14<25:18, 3.72s/it] 22%|██▏ | 113/520 [07:18<25:14, 3.72s/it] {'loss': 10.6654, 'grad_norm': 6.150985938298407e-05, 'learning_rate': 8.20220126477865, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:18<25:14, 3.72s/it] 22%|██▏ | 114/520 [07:21<25:00, 3.70s/it] {'loss': 11.2133, 'grad_norm': 7.011233286785038e-05, 'learning_rate': 8.186184199300463, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:21<25:00, 3.70s/it] 22%|██▏ | 115/520 [07:25<25:02, 3.71s/it] {'loss': 11.2022, 'grad_norm': 5.482784328741935e-05, 'learning_rate': 8.17002391031667, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:25<25:02, 3.71s/it] 22%|██▏ | 116/520 [07:29<24:55, 3.70s/it] {'loss': 10.6208, 'grad_norm': 3.355760622866573e-05, 'learning_rate': 8.153721025721355, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:29<24:55, 3.70s/it] 22%|██▎ | 117/520 [07:32<24:53, 3.71s/it] {'loss': 10.8644, 'grad_norm': 6.578054423753332e-05, 'learning_rate': 8.137276178949024, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:32<24:53, 3.71s/it] 23%|██▎ | 118/520 [07:36<24:54, 3.72s/it] {'loss': 10.4166, 'grad_norm': 3.1212301266685615e-05, 'learning_rate': 8.120690008950007, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:36<24:54, 3.72s/it] 23%|██▎ | 119/520 [07:40<24:48, 3.71s/it] {'loss': 10.2855, 'grad_norm': 3.3618447365993956e-05, 'learning_rate': 8.103963160165627, 'epoch': 0.23} + 23%|██▎ | 119/520 [07:40<24:48, 3.71s/it] 23%|██▎ | 120/520 [07:44<24:39, 3.70s/it] {'loss': 11.2058, 'grad_norm': 3.103823969256252e-05, 'learning_rate': 8.08709628250315, 'epoch': 0.23} + 23%|██▎ | 120/520 [07:44<24:39, 3.70s/it] 23%|██▎ | 121/520 [07:47<24:34, 3.70s/it] {'loss': 10.3463, 'grad_norm': 4.066890363731962e-05, 'learning_rate': 8.070090031310558, 'epoch': 0.23} + 23%|██▎ | 121/520 [07:47<24:34, 3.70s/it] 23%|██▎ | 122/520 [07:51<24:26, 3.69s/it] {'loss': 10.5456, 'grad_norm': 4.05358660395557e-05, 'learning_rate': 8.05294506735106, 'epoch': 0.23} + 23%|██▎ | 122/520 [07:51<24:26, 3.69s/it] 24%|██▎ | 123/520 [07:55<24:28, 3.70s/it] {'loss': 10.8321, 'grad_norm': 3.364835200311453e-05, 'learning_rate': 8.035662056777431, 'epoch': 0.24} + 24%|██▎ | 123/520 [07:55<24:28, 3.70s/it] 24%|██▍ | 124/520 [07:58<24:23, 3.70s/it] {'loss': 11.4009, 'grad_norm': 3.1575501659222025e-05, 'learning_rate': 8.018241671106134, 'epoch': 0.24} + 24%|██▍ | 124/520 [07:58<24:23, 3.70s/it] 24%|██▍ | 125/520 [08:02<24:21, 3.70s/it] {'loss': 10.8536, 'grad_norm': 4.544134376280623e-05, 'learning_rate': 8.000684587191216, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:02<24:21, 3.70s/it] 24%|██▍ | 126/520 [08:06<25:42, 3.91s/it] {'loss': 10.3835, 'grad_norm': 3.7388804980481e-05, 'learning_rate': 7.982991487198023, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:06<25:42, 3.91s/it] 24%|██▍ | 127/520 [08:10<25:18, 3.86s/it] {'loss': 11.3399, 'grad_norm': 3.821119278391471e-05, 'learning_rate': 7.9651630585766835, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:10<25:18, 3.86s/it] 25%|██▍ | 128/520 [08:14<25:00, 3.83s/it] {'loss': 11.0962, 'grad_norm': 3.687477737145975e-05, 'learning_rate': 7.947199994035401, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:14<25:00, 3.83s/it] 25%|██▍ | 129/520 [08:18<24:51, 3.82s/it] {'loss': 10.1297, 'grad_norm': 3.943520540253697e-05, 'learning_rate': 7.929102991513549, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:18<24:51, 3.82s/it] 25%|██▌ | 130/520 [08:21<24:34, 3.78s/it] {'loss': 11.0076, 'grad_norm': 3.096001481178657e-05, 'learning_rate': 7.910872754154538, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:21<24:34, 3.78s/it] 25%|██▌ | 131/520 [08:25<24:25, 3.77s/it] {'loss': 10.7244, 'grad_norm': 3.567002725599562e-05, 'learning_rate': 7.892509990278509, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:25<24:25, 3.77s/it] 25%|██▌ | 132/520 [08:29<24:32, 3.79s/it] {'loss': 11.2898, 'grad_norm': 3.217210502956981e-05, 'learning_rate': 7.874015413354804, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:29<24:32, 3.79s/it] 26%|██▌ | 133/520 [08:33<24:30, 3.80s/it] {'loss': 11.3124, 'grad_norm': 3.6000920129279004e-05, 'learning_rate': 7.8553897419742444, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:33<24:30, 3.80s/it] 26%|██▌ | 134/520 [08:37<24:30, 3.81s/it] {'loss': 10.915, 'grad_norm': 3.545047775362655e-05, 'learning_rate': 7.83663369982122, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:37<24:30, 3.81s/it] 26%|██▌ | 135/520 [08:40<24:21, 3.80s/it] {'loss': 10.9672, 'grad_norm': 3.998863505789345e-05, 'learning_rate': 7.817748015645558, 'epoch': 0.26} + 26%|██▌ | 135/520 [08:40<24:21, 3.80s/it] 26%|██▌ | 136/520 [08:44<24:04, 3.76s/it] {'loss': 10.5063, 'grad_norm': 3.8805316711868386e-05, 'learning_rate': 7.798733423234218, 'epoch': 0.26} + 26%|██▌ | 136/520 [08:44<24:04, 3.76s/it] 26%|██▋ | 137/520 [08:48<24:01, 3.76s/it] {'loss': 11.1054, 'grad_norm': 4.886959254041963e-05, 'learning_rate': 7.779590661382778, 'epoch': 0.26} + 26%|██▋ | 137/520 [08:48<24:01, 3.76s/it] 27%|██▋ | 138/520 [08:52<23:48, 3.74s/it] {'loss': 10.468, 'grad_norm': 4.216901696130001e-05, 'learning_rate': 7.760320473866727, 'epoch': 0.27} + 27%|██▋ | 138/520 [08:52<23:48, 3.74s/it] 27%|██▋ | 139/520 [08:55<23:40, 3.73s/it] {'loss': 10.7961, 'grad_norm': 4.370694916611772e-05, 'learning_rate': 7.74092360941257, 'epoch': 0.27} + 27%|██▋ | 139/520 [08:55<23:40, 3.73s/it] 27%|██▋ | 140/520 [08:59<23:37, 3.73s/it] {'loss': 10.8737, 'grad_norm': 3.590388000047951e-05, 'learning_rate': 7.721400821668733, 'epoch': 0.27} + 27%|██▋ | 140/520 [08:59<23:37, 3.73s/it] 27%|██▋ | 141/520 [09:03<23:38, 3.74s/it] {'loss': 10.5081, 'grad_norm': 3.013039383492039e-05, 'learning_rate': 7.7017528691762855, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:03<23:38, 3.74s/it] 27%|██▋ | 142/520 [09:06<23:26, 3.72s/it] {'loss': 10.7833, 'grad_norm': 2.675740263526317e-05, 'learning_rate': 7.681980515339464, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:06<23:26, 3.72s/it] 28%|██▊ | 143/520 [09:10<23:18, 3.71s/it] {'loss': 11.2417, 'grad_norm': 2.5665712077120558e-05, 'learning_rate': 7.6620845283960115, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:10<23:18, 3.71s/it] 28%|██▊ | 144/520 [09:14<23:09, 3.70s/it] {'loss': 10.6394, 'grad_norm': 3.3778661532898645e-05, 'learning_rate': 7.642065681387328, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:14<23:09, 3.70s/it] 28%|██▊ | 145/520 [09:18<23:10, 3.71s/it] {'loss': 10.8628, 'grad_norm': 3.401809698741281e-05, 'learning_rate': 7.621924752128438, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:18<23:10, 3.71s/it] 28%|██▊ | 146/520 [09:21<23:01, 3.69s/it] {'loss': 11.1209, 'grad_norm': 2.8479985204588243e-05, 'learning_rate': 7.601662523177762, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:21<23:01, 3.69s/it] 28%|██▊ | 147/520 [09:25<23:03, 3.71s/it] {'loss': 10.5541, 'grad_norm': 4.849501600526615e-05, 'learning_rate': 7.581279781806721, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:25<23:03, 3.71s/it] 28%|██▊ | 148/520 [09:29<22:58, 3.70s/it] {'loss': 10.5482, 'grad_norm': 4.595110389965653e-05, 'learning_rate': 7.560777319969136, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:29<22:58, 3.70s/it] 29%|██▊ | 149/520 [09:32<23:00, 3.72s/it] {'loss': 10.9834, 'grad_norm': 3.89872344768023e-05, 'learning_rate': 7.540155934270471, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:32<23:00, 3.72s/it] 29%|██▉ | 150/520 [09:36<23:00, 3.73s/it] {'loss': 10.6095, 'grad_norm': 3.617138321833538e-05, 'learning_rate': 7.519416425936865, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:36<23:00, 3.73s/it] 29%|██▉ | 151/520 [09:40<22:59, 3.74s/it] {'loss': 10.6998, 'grad_norm': 3.4420245063359436e-05, 'learning_rate': 7.498559600784017, 'epoch': 0.29} + 29%|██▉ | 151/520 [09:40<22:59, 3.74s/it] 29%|██▉ | 152/520 [09:44<23:00, 3.75s/it] {'loss': 10.9988, 'grad_norm': 4.3424974927204935e-05, 'learning_rate': 7.477586269185868, 'epoch': 0.29} + 29%|██▉ | 152/520 [09:44<23:00, 3.75s/it] 29%|██▉ | 153/520 [09:47<23:02, 3.77s/it] {'loss': 10.5317, 'grad_norm': 3.323295108276432e-05, 'learning_rate': 7.456497246043113, 'epoch': 0.29} + 29%|██▉ | 153/520 [09:48<23:02, 3.77s/it] 30%|██▉ | 154/520 [09:51<22:53, 3.75s/it] {'loss': 10.5819, 'grad_norm': 6.805850667479804e-05, 'learning_rate': 7.435293350751545, 'epoch': 0.3} + 30%|██▉ | 154/520 [09:51<22:53, 3.75s/it] 30%|██▉ | 155/520 [09:55<23:02, 3.79s/it] {'loss': 11.171, 'grad_norm': 2.8452963816834416e-05, 'learning_rate': 7.413975407170216, 'epoch': 0.3} + 30%|██▉ | 155/520 [09:55<23:02, 3.79s/it] 30%|███ | 156/520 [09:59<23:08, 3.81s/it] {'loss': 11.0902, 'grad_norm': 2.4234646182833555e-05, 'learning_rate': 7.392544243589427, 'epoch': 0.3} + 30%|███ | 156/520 [09:59<23:08, 3.81s/it] 30%|███ | 157/520 [10:03<23:12, 3.84s/it] {'loss': 11.2428, 'grad_norm': 2.4771767885646904e-05, 'learning_rate': 7.371000692698539, 'epoch': 0.3} + 30%|███ | 157/520 [10:03<23:12, 3.84s/it] 30%|███ | 158/520 [10:07<23:09, 3.84s/it] {'loss': 10.5876, 'grad_norm': 4.733138853990739e-05, 'learning_rate': 7.34934559155363, 'epoch': 0.3} + 30%|███ | 158/520 [10:07<23:09, 3.84s/it] 31%|███ | 159/520 [10:11<23:09, 3.85s/it] {'loss': 10.5045, 'grad_norm': 2.6615212839941857e-05, 'learning_rate': 7.327579781544963, 'epoch': 0.31} + 31%|███ | 159/520 [10:11<23:09, 3.85s/it] 31%|███ | 160/520 [10:14<23:11, 3.87s/it] {'loss': 10.6728, 'grad_norm': 2.6573372862318608e-05, 'learning_rate': 7.305704108364301, 'epoch': 0.31} + 31%|███ | 160/520 [10:14<23:11, 3.87s/it] 31%|███ | 161/520 [10:18<23:17, 3.89s/it] {'loss': 10.7671, 'grad_norm': 2.475444766200767e-05, 'learning_rate': 7.283719421972047, 'epoch': 0.31} + 31%|███ | 161/520 [10:18<23:17, 3.89s/it] 31%|███ | 162/520 [10:22<23:10, 3.88s/it] {'loss': 11.1447, 'grad_norm': 2.639451947738052e-05, 'learning_rate': 7.261626576564214, 'epoch': 0.31} + 31%|███ | 162/520 [10:22<23:10, 3.88s/it] 31%|███▏ | 163/520 [10:26<23:10, 3.90s/it] {'loss': 10.9372, 'grad_norm': 2.8634431295735502e-05, 'learning_rate': 7.239426430539243, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:26<23:10, 3.90s/it] 32%|███▏ | 164/520 [10:30<23:21, 3.94s/it] {'loss': 10.8198, 'grad_norm': 2.673917172061674e-05, 'learning_rate': 7.217119846464648, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:30<23:21, 3.94s/it] 32%|███▏ | 165/520 [10:34<23:43, 4.01s/it] {'loss': 10.5207, 'grad_norm': 2.8995270071101092e-05, 'learning_rate': 7.194707691043502, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:34<23:43, 4.01s/it] 32%|███▏ | 166/520 [10:39<23:53, 4.05s/it] {'loss': 10.4937, 'grad_norm': 3.969561650387469e-05, 'learning_rate': 7.172190835080757, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:39<23:53, 4.05s/it] 32%|███▏ | 167/520 [10:43<24:03, 4.09s/it] {'loss': 10.8921, 'grad_norm': 3.4385568584554856e-05, 'learning_rate': 7.149570153449421, 'epoch': 0.32} + 32%|███▏ | 167/520 [10:43<24:03, 4.09s/it] 32%|███▏ | 168/520 [10:47<24:06, 4.11s/it] {'loss': 10.5097, 'grad_norm': 4.650399388095978e-05, 'learning_rate': 7.126846525056555, 'epoch': 0.32} + 32%|███▏ | 168/520 [10:47<24:06, 4.11s/it] 32%|███▎ | 169/520 [10:51<23:51, 4.08s/it] {'loss': 10.8014, 'grad_norm': 6.42942451988021e-05, 'learning_rate': 7.104020832809127, 'epoch': 0.33} + 32%|███▎ | 169/520 [10:51<23:51, 4.08s/it] 33%|███▎ | 170/520 [10:55<23:35, 4.04s/it] {'loss': 10.7403, 'grad_norm': 3.706465975783043e-05, 'learning_rate': 7.081093963579708, 'epoch': 0.33} + 33%|███▎ | 170/520 [10:55<23:35, 4.04s/it] 33%|███▎ | 171/520 [10:59<23:23, 4.02s/it] {'loss': 10.9139, 'grad_norm': 3.7334059748037686e-05, 'learning_rate': 7.058066808172016, 'epoch': 0.33} + 33%|███▎ | 171/520 [10:59<23:23, 4.02s/it] 33%|███▎ | 172/520 [11:03<23:04, 3.98s/it] {'loss': 10.3347, 'grad_norm': 3.061214851371433e-05, 'learning_rate': 7.034940261286299, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:03<23:04, 3.98s/it] 33%|███▎ | 173/520 [11:07<22:50, 3.95s/it] {'loss': 10.6835, 'grad_norm': 3.673446448804639e-05, 'learning_rate': 7.011715221484579, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:07<22:50, 3.95s/it] 33%|███▎ | 174/520 [11:11<22:43, 3.94s/it] {'loss': 11.1019, 'grad_norm': 3.241811192290814e-05, 'learning_rate': 6.988392591155727, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:11<22:43, 3.94s/it] 34%|███▎ | 175/520 [11:15<22:43, 3.95s/it] {'loss': 10.6795, 'grad_norm': 3.976072246734734e-05, 'learning_rate': 6.964973276480421, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:15<22:43, 3.95s/it] 34%|███▍ | 176/520 [11:19<22:54, 4.00s/it] {'loss': 11.1667, 'grad_norm': 2.1454659242480787e-05, 'learning_rate': 6.941458187395917, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:19<22:54, 4.00s/it] 34%|███▍ | 177/520 [11:23<22:46, 3.98s/it] {'loss': 11.0912, 'grad_norm': 2.378710657503025e-05, 'learning_rate': 6.917848237560708, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:23<22:46, 3.98s/it] 34%|███▍ | 178/520 [11:27<22:57, 4.03s/it] {'loss': 10.8252, 'grad_norm': 2.6223552564187907e-05, 'learning_rate': 6.894144344319015, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:27<22:57, 4.03s/it] 34%|███▍ | 179/520 [11:31<22:55, 4.03s/it] {'loss': 10.6363, 'grad_norm': 2.859435763146307e-05, 'learning_rate': 6.870347428665153, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:31<22:55, 4.03s/it] 35%|███▍ | 180/520 [11:35<23:03, 4.07s/it] {'loss': 10.9032, 'grad_norm': 1.8830442531659782e-05, 'learning_rate': 6.846458415207741, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:35<23:03, 4.07s/it] 35%|███▍ | 181/520 [11:39<23:07, 4.09s/it] {'loss': 10.274, 'grad_norm': 2.8492832109213992e-05, 'learning_rate': 6.82247823213378, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:39<23:07, 4.09s/it] 35%|███▌ | 182/520 [11:43<23:09, 4.11s/it] {'loss': 10.9941, 'grad_norm': 2.999881292019698e-05, 'learning_rate': 6.798407811172586, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:43<23:09, 4.11s/it] 35%|███▌ | 183/520 [11:47<23:10, 4.13s/it] {'loss': 10.3981, 'grad_norm': 2.9629844377491494e-05, 'learning_rate': 6.774248087559589, 'epoch': 0.35} + 35%|███▌ | 183/520 [11:47<23:10, 4.13s/it] 35%|███▌ | 184/520 [11:51<23:06, 4.13s/it] {'loss': 10.8423, 'grad_norm': 3.445754989870153e-05, 'learning_rate': 6.75, 'epoch': 0.35} + 35%|███▌ | 184/520 [11:51<23:06, 4.13s/it] 36%|███▌ | 185/520 [11:56<23:04, 4.13s/it] {'loss': 10.8693, 'grad_norm': 3.542400189315843e-05, 'learning_rate': 6.725664490632333, 'epoch': 0.36} + 36%|███▌ | 185/520 [11:56<23:04, 4.13s/it] 36%|███▌ | 186/520 [12:00<23:04, 4.14s/it] {'loss': 10.6023, 'grad_norm': 3.7470073761964756e-05, 'learning_rate': 6.701242504991802, 'epoch': 0.36} + 36%|███▌ | 186/520 [12:00<23:04, 4.14s/it] 36%|███▌ | 187/520 [12:04<23:00, 4.14s/it] {'loss': 11.3438, 'grad_norm': 2.5048548897957436e-05, 'learning_rate': 6.6767349919735794, 'epoch': 0.36} + 36%|███▌ | 187/520 [12:04<23:00, 4.14s/it] 36%|███▌ | 188/520 [12:08<22:56, 4.15s/it] {'loss': 10.427, 'grad_norm': 3.761223841921656e-05, 'learning_rate': 6.652142903795932, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:08<22:56, 4.15s/it] 36%|███▋ | 189/520 [12:12<22:52, 4.15s/it] {'loss': 10.6257, 'grad_norm': 3.3143383554109715e-05, 'learning_rate': 6.627467195963222, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:12<22:52, 4.15s/it] 37%|███▋ | 190/520 [12:16<22:52, 4.16s/it] {'loss': 10.8994, 'grad_norm': 4.176626674162258e-05, 'learning_rate': 6.6027088272287795, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:16<22:52, 4.16s/it] 37%|███▋ | 191/520 [12:20<22:31, 4.11s/it] {'loss': 11.0913, 'grad_norm': 3.8552363997937084e-05, 'learning_rate': 6.577868759557654, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:20<22:31, 4.11s/it] 37%|███▋ | 192/520 [12:24<22:10, 4.06s/it] {'loss': 11.1414, 'grad_norm': 4.6222193580442385e-05, 'learning_rate': 6.552947958089233, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:24<22:10, 4.06s/it] 37%|███▋ | 193/520 [12:28<21:52, 4.01s/it] {'loss': 11.0865, 'grad_norm': 3.6666994111164546e-05, 'learning_rate': 6.5279473910997545, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:28<21:52, 4.01s/it] 37%|███▋ | 194/520 [12:32<21:36, 3.98s/it] {'loss': 10.5415, 'grad_norm': 2.383631280551719e-05, 'learning_rate': 6.502868029964665, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:32<21:36, 3.98s/it] 38%|███▊ | 195/520 [12:36<21:28, 3.96s/it] {'loss': 10.2341, 'grad_norm': 2.4205557228720383e-05, 'learning_rate': 6.477710849120903, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:36<21:28, 3.96s/it] 38%|███▊ | 196/520 [12:40<21:31, 3.99s/it] {'loss': 10.7993, 'grad_norm': 1.9645685551434623e-05, 'learning_rate': 6.452476826029011, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:40<21:31, 3.99s/it] 38%|███▊ | 197/520 [12:44<21:37, 4.02s/it] {'loss': 10.5337, 'grad_norm': 2.4503974609984102e-05, 'learning_rate': 6.427166941135182, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:44<21:37, 4.02s/it] 38%|███▊ | 198/520 [12:48<21:36, 4.03s/it] {'loss': 10.8713, 'grad_norm': 2.158161363098667e-05, 'learning_rate': 6.401782177833148, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:48<21:36, 4.03s/it] 38%|███▊ | 199/520 [12:52<21:34, 4.03s/it] {'loss': 11.0253, 'grad_norm': 1.724510633880462e-05, 'learning_rate': 6.376323522425977, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:52<21:34, 4.03s/it] 38%|███▊ | 200/520 [12:56<21:32, 4.04s/it] {'loss': 10.9539, 'grad_norm': 2.0486815417462346e-05, 'learning_rate': 6.350791964087753, 'epoch': 0.38} + 38%|███▊ | 200/520 [12:56<21:32, 4.04s/it] 39%|███▊ | 201/520 [13:00<21:24, 4.03s/it] {'loss': 10.472, 'grad_norm': 2.2326959327991102e-05, 'learning_rate': 6.325188494825138, 'epoch': 0.39} + 39%|███▊ | 201/520 [13:00<21:24, 4.03s/it] 39%|███▉ | 202/520 [13:04<21:00, 3.96s/it] {'loss': 10.8271, 'grad_norm': 2.7118877097904363e-05, 'learning_rate': 6.299514109438833, 'epoch': 0.39} + 39%|███▉ | 202/520 [13:04<21:00, 3.96s/it] 39%|███▉ | 203/520 [13:08<20:43, 3.92s/it] {'loss': 10.609, 'grad_norm': 2.708297276774064e-05, 'learning_rate': 6.273769805484927, 'epoch': 0.39} + 39%|███▉ | 203/520 [13:08<20:43, 3.92s/it] 39%|███▉ | 204/520 [13:12<20:31, 3.90s/it] {'loss': 10.6913, 'grad_norm': 3.12268061139716e-05, 'learning_rate': 6.247956583236126, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:12<20:31, 3.90s/it] 39%|███▉ | 205/520 [13:16<20:34, 3.92s/it] {'loss': 10.7336, 'grad_norm': 1.8568042667138586e-05, 'learning_rate': 6.222075445642904, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:16<20:34, 3.92s/it] 40%|███▉ | 206/520 [13:20<20:25, 3.90s/it] {'loss': 10.8022, 'grad_norm': 2.145340048094839e-05, 'learning_rate': 6.196127398294523, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:20<20:25, 3.90s/it] 40%|███▉ | 207/520 [13:24<20:16, 3.89s/it] {'loss': 10.5911, 'grad_norm': 2.6999249489232517e-05, 'learning_rate': 6.17011344937997, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:24<20:16, 3.89s/it] 40%|████ | 208/520 [13:27<20:07, 3.87s/it] {'loss': 11.1935, 'grad_norm': 2.8937967267949355e-05, 'learning_rate': 6.144034609648779, 'epoch': 0.4} + 40%|████ | 208/520 [13:27<20:07, 3.87s/it] 40%|████ | 209/520 [13:31<20:02, 3.87s/it] {'loss': 11.0481, 'grad_norm': 2.492192410662862e-05, 'learning_rate': 6.117891892371754, 'epoch': 0.4} + 40%|████ | 209/520 [13:31<20:02, 3.87s/it] 40%|████ | 210/520 [13:35<20:01, 3.88s/it] {'loss': 10.6857, 'grad_norm': 2.8542718637730045e-05, 'learning_rate': 6.091686313301616, 'epoch': 0.4} + 40%|████ | 210/520 [13:35<20:01, 3.88s/it] 41%|████ | 211/520 [13:39<19:59, 3.88s/it] {'loss': 10.8172, 'grad_norm': 3.214180678769463e-05, 'learning_rate': 6.065418890633522, 'epoch': 0.41} + 41%|████ | 211/520 [13:39<19:59, 3.88s/it] 41%|████ | 212/520 [13:43<19:49, 3.86s/it] {'loss': 10.0889, 'grad_norm': 4.00338547323061e-05, 'learning_rate': 6.03909064496551, 'epoch': 0.41} + 41%|████ | 212/520 [13:43<19:49, 3.86s/it] 41%|████ | 213/520 [13:47<19:47, 3.87s/it] {'loss': 11.4977, 'grad_norm': 1.843754385015699e-05, 'learning_rate': 6.012702599258839, 'epoch': 0.41} + 41%|████ | 213/520 [13:47<19:47, 3.87s/it] 41%|████ | 214/520 [13:50<19:33, 3.84s/it] {'loss': 10.7252, 'grad_norm': 2.1359003655147687e-05, 'learning_rate': 5.986255778798252, 'epoch': 0.41} + 41%|████ | 214/520 [13:50<19:33, 3.84s/it] 41%|████▏ | 215/520 [13:54<19:15, 3.79s/it] {'loss': 11.0768, 'grad_norm': 2.267529307798385e-05, 'learning_rate': 5.959751211152132, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:54<19:15, 3.79s/it] 42%|████▏ | 216/520 [13:58<19:03, 3.76s/it] {'loss': 10.8484, 'grad_norm': 3.393218282497025e-05, 'learning_rate': 5.933189926132581, 'epoch': 0.42} + 42%|████▏ | 216/520 [13:58<19:03, 3.76s/it] 42%|████▏ | 217/520 [14:02<18:51, 3.73s/it] {'loss': 10.6669, 'grad_norm': 3.433784698320188e-05, 'learning_rate': 5.906572955755401, 'epoch': 0.42} + 42%|████▏ | 217/520 [14:02<18:51, 3.73s/it] 42%|████▏ | 218/520 [14:05<18:46, 3.73s/it] {'loss': 11.3101, 'grad_norm': 1.888056763359256e-05, 'learning_rate': 5.879901334200005, 'epoch': 0.42} + 42%|████▏ | 218/520 [14:05<18:46, 3.73s/it] 42%|████▏ | 219/520 [14:09<18:41, 3.72s/it] {'loss': 10.1822, 'grad_norm': 2.765298953602181e-05, 'learning_rate': 5.853176097769229, 'epoch': 0.42} + 42%|████▏ | 219/520 [14:09<18:41, 3.72s/it] 42%|████▏ | 220/520 [14:13<18:29, 3.70s/it] {'loss': 10.8857, 'grad_norm': 2.1423151775922533e-05, 'learning_rate': 5.826398284849069, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:13<18:29, 3.70s/it] 42%|████▎ | 221/520 [14:16<18:34, 3.73s/it] {'loss': 10.7589, 'grad_norm': 2.5720379095979693e-05, 'learning_rate': 5.799568935868335, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:16<18:34, 3.73s/it] 43%|████▎ | 222/520 [14:20<18:28, 3.72s/it] {'loss': 10.547, 'grad_norm': 3.003338888669511e-05, 'learning_rate': 5.772689093258224, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:20<18:28, 3.72s/it] 43%|████▎ | 223/520 [14:24<18:25, 3.72s/it] {'loss': 10.4353, 'grad_norm': 2.1581629031151462e-05, 'learning_rate': 5.745759801411822, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:24<18:25, 3.72s/it] 43%|████▎ | 224/520 [14:27<18:17, 3.71s/it] {'loss': 11.5178, 'grad_norm': 1.4480853694866198e-05, 'learning_rate': 5.718782106643523, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:27<18:17, 3.71s/it] 43%|████▎ | 225/520 [14:31<18:09, 3.69s/it] {'loss': 10.5233, 'grad_norm': 2.4908940054373207e-05, 'learning_rate': 5.691757057148372, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:31<18:09, 3.69s/it] 43%|████▎ | 226/520 [14:35<18:08, 3.70s/it] {'loss': 10.7455, 'grad_norm': 2.84547487344406e-05, 'learning_rate': 5.664685702961343, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:35<18:08, 3.70s/it] 44%|████▎ | 227/520 [14:39<18:03, 3.70s/it] {'loss': 10.6125, 'grad_norm': 3.9920120115761546e-05, 'learning_rate': 5.637569095916538, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:39<18:03, 3.70s/it] 44%|████▍ | 228/520 [14:42<18:01, 3.70s/it] {'loss': 11.4455, 'grad_norm': 5.264126297112782e-05, 'learning_rate': 5.610408289606321, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:42<18:01, 3.70s/it] 44%|████▍ | 229/520 [14:46<18:05, 3.73s/it] {'loss': 10.3945, 'grad_norm': 6.0957858873380666e-05, 'learning_rate': 5.583204339340379, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:46<18:05, 3.73s/it] 44%|████▍ | 230/520 [14:50<18:16, 3.78s/it] {'loss': 11.1481, 'grad_norm': 3.341397757815283e-05, 'learning_rate': 5.5559583021047185, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:50<18:16, 3.78s/it] 44%|████▍ | 231/520 [14:54<18:12, 3.78s/it] {'loss': 10.6704, 'grad_norm': 2.7659410281029905e-05, 'learning_rate': 5.528671236520603, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:54<18:12, 3.78s/it] 45%|████▍ | 232/520 [14:57<18:02, 3.76s/it] {'loss': 11.2169, 'grad_norm': 1.7244331674864714e-05, 'learning_rate': 5.5013442028034145, 'epoch': 0.45} + 45%|████▍ | 232/520 [14:57<18:02, 3.76s/it] 45%|████▍ | 233/520 [15:01<17:53, 3.74s/it] {'loss': 11.1974, 'grad_norm': 2.4681000667841873e-05, 'learning_rate': 5.473978262721463, 'epoch': 0.45} + 45%|████▍ | 233/520 [15:01<17:53, 3.74s/it] 45%|████▌ | 234/520 [15:05<17:49, 3.74s/it] {'loss': 10.6773, 'grad_norm': 2.4885778105615655e-05, 'learning_rate': 5.446574479554731, 'epoch': 0.45} + 45%|████▌ | 234/520 [15:05<17:49, 3.74s/it] 45%|████▌ | 235/520 [15:09<17:37, 3.71s/it] {'loss': 10.7346, 'grad_norm': 2.6671173594034657e-05, 'learning_rate': 5.419133918053562, 'epoch': 0.45} + 45%|████▌ | 235/520 [15:09<17:37, 3.71s/it] 45%|████▌ | 236/520 [15:12<17:30, 3.70s/it] {'loss': 10.7174, 'grad_norm': 3.536963498873289e-05, 'learning_rate': 5.39165764439729, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:12<17:30, 3.70s/it] 46%|████▌ | 237/520 [15:16<17:30, 3.71s/it] {'loss': 10.4439, 'grad_norm': 2.490225284754477e-05, 'learning_rate': 5.364146726152813, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:16<17:30, 3.71s/it] 46%|████▌ | 238/520 [15:20<17:26, 3.71s/it] {'loss': 10.6274, 'grad_norm': 4.2704136336494404e-05, 'learning_rate': 5.336602232233116, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:20<17:26, 3.71s/it] 46%|████▌ | 239/520 [15:23<17:24, 3.72s/it] {'loss': 11.1149, 'grad_norm': 2.8136368960604985e-05, 'learning_rate': 5.309025232855737, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:23<17:24, 3.72s/it] 46%|████▌ | 240/520 [15:27<17:19, 3.71s/it] {'loss': 10.6252, 'grad_norm': 3.140234151014477e-05, 'learning_rate': 5.281416799501187, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:27<17:19, 3.71s/it] 46%|████▋ | 241/520 [15:31<17:12, 3.70s/it] {'loss': 10.6864, 'grad_norm': 5.208227526524726e-05, 'learning_rate': 5.253778004871314, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:31<17:12, 3.70s/it] 47%|████▋ | 242/520 [15:34<17:09, 3.70s/it] {'loss': 10.745, 'grad_norm': 3.215713998227321e-05, 'learning_rate': 5.22610992284763, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:34<17:09, 3.70s/it] 47%|████▋ | 243/520 [15:38<17:05, 3.70s/it] {'loss': 10.626, 'grad_norm': 4.245149783378657e-05, 'learning_rate': 5.198413628449582, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:38<17:05, 3.70s/it] 47%|████▋ | 244/520 [15:42<16:59, 3.69s/it] {'loss': 10.578, 'grad_norm': 4.332817033861442e-05, 'learning_rate': 5.170690197792784, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:42<16:59, 3.69s/it] 47%|████▋ | 245/520 [15:46<17:00, 3.71s/it] {'loss': 10.676, 'grad_norm': 2.582647289391325e-05, 'learning_rate': 5.1429407080472105, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:46<17:00, 3.71s/it] 47%|████▋ | 246/520 [15:50<17:21, 3.80s/it] {'loss': 11.1626, 'grad_norm': 1.379379596340063e-05, 'learning_rate': 5.11516623739533, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:50<17:21, 3.80s/it] 48%|████▊ | 247/520 [15:54<17:27, 3.84s/it] {'loss': 10.8961, 'grad_norm': 1.6575734489640833e-05, 'learning_rate': 5.087367864990233, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:54<17:27, 3.84s/it] 48%|████▊ | 248/520 [15:57<17:31, 3.87s/it] {'loss': 10.7116, 'grad_norm': 1.9056004989184032e-05, 'learning_rate': 5.059546670913684, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:57<17:31, 3.87s/it] 48%|████▊ | 249/520 [16:01<17:28, 3.87s/it] {'loss': 10.7478, 'grad_norm': 1.5017878687216072e-05, 'learning_rate': 5.031703736134168, 'epoch': 0.48} + 48%|████▊ | 249/520 [16:01<17:28, 3.87s/it] 48%|████▊ | 250/520 [16:05<17:27, 3.88s/it] {'loss': 11.3483, 'grad_norm': 1.693451898510002e-05, 'learning_rate': 5.0038401424648855, 'epoch': 0.48} + 48%|████▊ | 250/520 [16:05<17:27, 3.88s/it] 48%|████▊ | 251/520 [16:09<17:24, 3.88s/it] {'loss': 10.8772, 'grad_norm': 1.2465766519268658e-05, 'learning_rate': 4.97595697252172, 'epoch': 0.48} + 48%|████▊ | 251/520 [16:09<17:24, 3.88s/it] 48%|████▊ | 252/520 [16:13<17:21, 3.89s/it] {'loss': 10.8841, 'grad_norm': 1.5574872625735506e-05, 'learning_rate': 4.948055309681175, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:13<17:21, 3.89s/it] 49%|████▊ | 253/520 [16:17<17:17, 3.89s/it] {'loss': 11.2924, 'grad_norm': 2.1783725169198614e-05, 'learning_rate': 4.920136238038277, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:17<17:17, 3.89s/it] 49%|████▉ | 254/520 [16:21<17:15, 3.89s/it] {'loss': 10.5266, 'grad_norm': 2.599525121809596e-05, 'learning_rate': 4.8922008423644625, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:21<17:15, 3.89s/it] 49%|████▉ | 255/520 [16:25<17:07, 3.88s/it] {'loss': 11.073, 'grad_norm': 1.7397984706929837e-05, 'learning_rate': 4.864250208065415, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:25<17:07, 3.88s/it] 49%|████▉ | 256/520 [16:29<17:03, 3.88s/it] {'loss': 10.5745, 'grad_norm': 1.7763189874128396e-05, 'learning_rate': 4.83628542113891, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:29<17:03, 3.88s/it] 49%|████▉ | 257/520 [16:32<16:57, 3.87s/it] {'loss': 10.743, 'grad_norm': 2.2030491725178385e-05, 'learning_rate': 4.808307568132605, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:32<16:57, 3.87s/it] 50%|████▉ | 258/520 [16:36<17:01, 3.90s/it] {'loss': 10.5714, 'grad_norm': 2.6790872132624497e-05, 'learning_rate': 4.780317736101835, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:36<17:01, 3.90s/it] 50%|████▉ | 259/520 [16:40<16:52, 3.88s/it] {'loss': 11.0812, 'grad_norm': 1.5757329346556485e-05, 'learning_rate': 4.752317012567363, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:40<16:52, 3.88s/it] 50%|█████ | 260/520 [16:44<16:50, 3.89s/it] {'loss': 10.8037, 'grad_norm': 2.121055033036972e-05, 'learning_rate': 4.724306485473138, 'epoch': 0.5} + 50%|█████ | 260/520 [16:44<16:50, 3.89s/it] 50%|█████ | 261/520 [16:48<16:44, 3.88s/it] {'loss': 11.1268, 'grad_norm': 1.2609307273591582e-05, 'learning_rate': 4.696287243144012, 'epoch': 0.5} + 50%|█████ | 261/520 [16:48<16:44, 3.88s/it] 50%|█████ | 262/520 [16:52<16:42, 3.89s/it] {'loss': 10.6799, 'grad_norm': 1.7977336321290605e-05, 'learning_rate': 4.6682603742434665, 'epoch': 0.5} + 50%|█████ | 262/520 [16:52<16:42, 3.89s/it] 51%|█████ | 263/520 [16:56<16:35, 3.87s/it] {'loss': 11.1321, 'grad_norm': 1.341385570156729e-05, 'learning_rate': 4.6402269677313, 'epoch': 0.51} + 51%|█████ | 263/520 [16:56<16:35, 3.87s/it] 51%|█████ | 264/520 [17:00<16:34, 3.88s/it] {'loss': 10.8776, 'grad_norm': 1.584182509474845e-05, 'learning_rate': 4.612188112821328, 'epoch': 0.51} + 51%|█████ | 264/520 [17:00<16:34, 3.88s/it] 51%|█████ | 265/520 [17:03<16:26, 3.87s/it] {'loss': 11.1409, 'grad_norm': 2.222476235185639e-05, 'learning_rate': 4.58414489893906, 'epoch': 0.51} + 51%|█████ | 265/520 [17:03<16:26, 3.87s/it] 51%|█████ | 266/520 [17:07<16:25, 3.88s/it] {'loss': 10.1332, 'grad_norm': 2.5704085084098533e-05, 'learning_rate': 4.556098415679368, 'epoch': 0.51} + 51%|█████ | 266/520 [17:07<16:25, 3.88s/it] 51%|█████▏ | 267/520 [17:11<16:19, 3.87s/it] {'loss': 10.3979, 'grad_norm': 2.9178646849089514e-05, 'learning_rate': 4.528049752764151, 'epoch': 0.51} + 51%|█████▏ | 267/520 [17:11<16:19, 3.87s/it] 52%|█████▏ | 268/520 [17:15<16:13, 3.86s/it] {'loss': 11.4214, 'grad_norm': 1.3187863883751541e-05, 'learning_rate': 4.5, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:15<16:13, 3.86s/it] 52%|█████▏ | 269/520 [17:19<15:58, 3.82s/it] {'loss': 10.8022, 'grad_norm': 1.50849801070864e-05, 'learning_rate': 4.471950247235849, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:19<15:58, 3.82s/it] 52%|█████▏ | 270/520 [17:23<16:00, 3.84s/it] {'loss': 10.7931, 'grad_norm': 1.8264103846499237e-05, 'learning_rate': 4.443901584320632, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:23<16:00, 3.84s/it] 52%|█████▏ | 271/520 [17:27<16:05, 3.88s/it] {'loss': 11.0579, 'grad_norm': 1.9166908253403733e-05, 'learning_rate': 4.415855101060941, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:27<16:05, 3.88s/it] 52%|█████▏ | 272/520 [17:31<16:03, 3.89s/it] {'loss': 11.0567, 'grad_norm': 1.246727286399051e-05, 'learning_rate': 4.387811887178673, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:31<16:03, 3.89s/it] 52%|█████▎ | 273/520 [17:35<16:07, 3.92s/it] {'loss': 10.868, 'grad_norm': 2.182610290872518e-05, 'learning_rate': 4.359773032268702, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:35<16:07, 3.92s/it] 53%|█████▎ | 274/520 [17:38<15:46, 3.85s/it] {'loss': 10.4866, 'grad_norm': 2.362969414756151e-05, 'learning_rate': 4.331739625756535, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:38<15:46, 3.85s/it] 53%|█████▎ | 275/520 [17:42<15:32, 3.81s/it] {'loss': 10.9266, 'grad_norm': 2.6970839499607503e-05, 'learning_rate': 4.3037127568559885, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:42<15:32, 3.81s/it] 53%|█████▎ | 276/520 [17:46<15:27, 3.80s/it] {'loss': 11.0122, 'grad_norm': 2.9410700605377693e-05, 'learning_rate': 4.275693514526862, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:46<15:27, 3.80s/it] 53%|█████▎ | 277/520 [17:49<15:17, 3.77s/it] {'loss': 11.1485, 'grad_norm': 1.4430201455571377e-05, 'learning_rate': 4.247682987432636, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:49<15:17, 3.77s/it] 53%|█████▎ | 278/520 [17:53<15:10, 3.76s/it] {'loss': 10.1464, 'grad_norm': 2.2012440497865033e-05, 'learning_rate': 4.219682263898165, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:53<15:10, 3.76s/it] 54%|█████▎ | 279/520 [17:57<15:03, 3.75s/it] {'loss': 11.2374, 'grad_norm': 1.5516323451874936e-05, 'learning_rate': 4.191692431867395, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:57<15:03, 3.75s/it] 54%|█████▍ | 280/520 [18:01<14:54, 3.73s/it] {'loss': 10.7415, 'grad_norm': 1.8120039861432947e-05, 'learning_rate': 4.163714578861091, 'epoch': 0.54} + 54%|█████▍ | 280/520 [18:01<14:54, 3.73s/it] 54%|█████▍ | 281/520 [18:04<14:58, 3.76s/it] {'loss': 10.8883, 'grad_norm': 1.8026501985562726e-05, 'learning_rate': 4.135749791934585, 'epoch': 0.54} + 54%|█████▍ | 281/520 [18:04<14:58, 3.76s/it] 54%|█████▍ | 282/520 [18:08<14:50, 3.74s/it] {'loss': 10.2257, 'grad_norm': 2.3244693088606055e-05, 'learning_rate': 4.1077991576355375, 'epoch': 0.54} + 54%|█████▍ | 282/520 [18:08<14:50, 3.74s/it] 54%|█████▍ | 283/520 [18:12<14:51, 3.76s/it] {'loss': 11.0544, 'grad_norm': 2.600943240420237e-05, 'learning_rate': 4.079863761961723, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:12<14:51, 3.76s/it] 55%|█████▍ | 284/520 [18:16<14:46, 3.76s/it] {'loss': 11.3227, 'grad_norm': 1.9395514649239044e-05, 'learning_rate': 4.051944690318826, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:16<14:46, 3.76s/it] 55%|█████▍ | 285/520 [18:19<14:47, 3.78s/it] {'loss': 10.4938, 'grad_norm': 1.5728491650962698e-05, 'learning_rate': 4.024043027478281, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:19<14:47, 3.78s/it] 55%|█████▌ | 286/520 [18:23<14:37, 3.75s/it] {'loss': 10.8443, 'grad_norm': 1.7561933929447283e-05, 'learning_rate': 3.9961598575351145, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:23<14:37, 3.75s/it] 55%|█████▌ | 287/520 [18:27<14:39, 3.77s/it] {'loss': 10.8793, 'grad_norm': 1.4908303694129376e-05, 'learning_rate': 3.9682962638658323, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:27<14:39, 3.77s/it] 55%|█████▌ | 288/520 [18:31<14:28, 3.74s/it] {'loss': 10.9515, 'grad_norm': 1.4106793003125033e-05, 'learning_rate': 3.940453329086318, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:31<14:28, 3.74s/it] 56%|█████▌ | 289/520 [18:34<14:24, 3.74s/it] {'loss': 10.8367, 'grad_norm': 1.8060920467054065e-05, 'learning_rate': 3.912632135009769, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:34<14:24, 3.74s/it] 56%|█████▌ | 290/520 [18:38<14:17, 3.73s/it] {'loss': 10.1739, 'grad_norm': 1.6694603580519752e-05, 'learning_rate': 3.8848337626046705, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:38<14:17, 3.73s/it] 56%|█████▌ | 291/520 [18:42<14:10, 3.71s/it] {'loss': 10.5974, 'grad_norm': 1.4831266119456313e-05, 'learning_rate': 3.857059291952791, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:42<14:10, 3.71s/it] 56%|█████▌ | 292/520 [18:46<14:11, 3.74s/it] {'loss': 10.8551, 'grad_norm': 1.3358641739218657e-05, 'learning_rate': 3.8293098022072147, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:46<14:11, 3.74s/it] 56%|█████▋ | 293/520 [18:49<14:03, 3.71s/it] {'loss': 10.7767, 'grad_norm': 1.545807755077123e-05, 'learning_rate': 3.8015863715504175, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:49<14:03, 3.71s/it] 57%|█████▋ | 294/520 [18:53<13:57, 3.71s/it] {'loss': 11.1712, 'grad_norm': 1.7950045101531653e-05, 'learning_rate': 3.7738900771523696, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:53<13:57, 3.71s/it] 57%|█████▋ | 295/520 [18:57<13:54, 3.71s/it] {'loss': 11.0683, 'grad_norm': 1.4406586265676163e-05, 'learning_rate': 3.746221995128687, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:57<13:54, 3.71s/it] 57%|█████▋ | 296/520 [19:00<13:47, 3.69s/it] {'loss': 10.3113, 'grad_norm': 2.6951166054176797e-05, 'learning_rate': 3.7185832004988137, 'epoch': 0.57} + 57%|█████▋ | 296/520 [19:00<13:47, 3.69s/it] 57%|█████▋ | 297/520 [19:04<13:41, 3.69s/it] {'loss': 10.7691, 'grad_norm': 2.4578050192610648e-05, 'learning_rate': 3.690974767144263, 'epoch': 0.57} + 57%|█████▋ | 297/520 [19:04<13:41, 3.69s/it] 57%|█████▋ | 298/520 [19:08<13:42, 3.71s/it] {'loss': 10.1699, 'grad_norm': 2.5412749253999e-05, 'learning_rate': 3.6633977677668845, 'epoch': 0.57} + 57%|█████▋ | 298/520 [19:08<13:42, 3.71s/it] 57%|█████▊ | 299/520 [19:11<13:35, 3.69s/it] {'loss': 10.977, 'grad_norm': 1.41956646217412e-05, 'learning_rate': 3.6358532738471876, 'epoch': 0.57} + 57%|█████▊ | 299/520 [19:11<13:35, 3.69s/it] 58%|█████▊ | 300/520 [19:15<13:40, 3.73s/it] {'loss': 10.7763, 'grad_norm': 1.3286462956767284e-05, 'learning_rate': 3.6083423556027117, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:15<13:40, 3.73s/it] 58%|█████▊ | 301/520 [19:19<13:34, 3.72s/it] {'loss': 10.3455, 'grad_norm': 1.758291396182965e-05, 'learning_rate': 3.5808660819464393, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:19<13:34, 3.72s/it] 58%|█████▊ | 302/520 [19:23<13:35, 3.74s/it] {'loss': 11.0106, 'grad_norm': 1.6361664732203792e-05, 'learning_rate': 3.55342552044527, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:23<13:35, 3.74s/it] 58%|█████▊ | 303/520 [19:26<13:33, 3.75s/it] {'loss': 10.7101, 'grad_norm': 2.1088436622496766e-05, 'learning_rate': 3.526021737278537, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:26<13:33, 3.75s/it] 58%|█████▊ | 304/520 [19:30<13:31, 3.75s/it] {'loss': 11.5772, 'grad_norm': 2.0252276432941023e-05, 'learning_rate': 3.4986557971965855, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:30<13:31, 3.75s/it] 59%|█████▊ | 305/520 [19:34<13:22, 3.73s/it] {'loss': 10.9924, 'grad_norm': 1.9938635062709086e-05, 'learning_rate': 3.4713287634793977, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:34<13:22, 3.73s/it] 59%|█████▉ | 306/520 [19:38<13:22, 3.75s/it] {'loss': 10.7508, 'grad_norm': 1.6004243640799963e-05, 'learning_rate': 3.4440416978952824, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:38<13:22, 3.75s/it] 59%|█████▉ | 307/520 [19:41<13:16, 3.74s/it] {'loss': 10.5232, 'grad_norm': 2.8382708077932825e-05, 'learning_rate': 3.4167956606596226, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:41<13:16, 3.74s/it] 59%|█████▉ | 308/520 [19:45<13:18, 3.77s/it] {'loss': 10.3517, 'grad_norm': 2.3147315253771927e-05, 'learning_rate': 3.3895917103936783, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:45<13:18, 3.77s/it] 59%|█████▉ | 309/520 [19:49<13:31, 3.84s/it] {'loss': 10.1135, 'grad_norm': 3.384500883776929e-05, 'learning_rate': 3.362430904083461, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:49<13:31, 3.84s/it] 60%|█████▉ | 310/520 [19:53<13:17, 3.80s/it] {'loss': 10.5623, 'grad_norm': 1.8260159631596744e-05, 'learning_rate': 3.3353142970386562, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:53<13:17, 3.80s/it] 60%|█████▉ | 311/520 [19:57<13:13, 3.80s/it] {'loss': 10.7148, 'grad_norm': 1.1821841061815316e-05, 'learning_rate': 3.3082429428516273, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:57<13:13, 3.80s/it] 60%|██████ | 312/520 [20:01<13:28, 3.89s/it] {'loss': 10.778, 'grad_norm': 1.2073494027268507e-05, 'learning_rate': 3.2812178933564775, 'epoch': 0.6} + 60%|██████ | 312/520 [20:01<13:28, 3.89s/it] 60%|██████ | 313/520 [20:05<13:15, 3.84s/it] {'loss': 9.868, 'grad_norm': 1.707034927957328e-05, 'learning_rate': 3.254240198588178, 'epoch': 0.6} + 60%|██████ | 313/520 [20:05<13:15, 3.84s/it] 60%|██████ | 314/520 [20:09<13:29, 3.93s/it] {'loss': 10.6507, 'grad_norm': 2.180917324083796e-05, 'learning_rate': 3.2273109067417765, 'epoch': 0.6} + 60%|██████ | 314/520 [20:09<13:29, 3.93s/it] 61%|██████ | 315/520 [20:12<13:09, 3.85s/it] {'loss': 11.4876, 'grad_norm': 1.3150464339907226e-05, 'learning_rate': 3.2004310641316662, 'epoch': 0.61} + 61%|██████ | 315/520 [20:12<13:09, 3.85s/it] 61%|██████ | 316/520 [20:17<13:25, 3.95s/it] {'loss': 11.0489, 'grad_norm': 1.575668954380509e-05, 'learning_rate': 3.173601715150931, 'epoch': 0.61} + 61%|██████ | 316/520 [20:17<13:25, 3.95s/it] 61%|██████ | 317/520 [20:20<13:05, 3.87s/it] {'loss': 10.0218, 'grad_norm': 2.6153171479670177e-05, 'learning_rate': 3.1468239022307714, 'epoch': 0.61} + 61%|██████ | 317/520 [20:20<13:05, 3.87s/it] 61%|██████ | 318/520 [20:24<12:51, 3.82s/it] {'loss': 11.3647, 'grad_norm': 1.848299157756956e-05, 'learning_rate': 3.120098665799996, 'epoch': 0.61} + 61%|██████ | 318/520 [20:24<12:51, 3.82s/it] 61%|██████▏ | 319/520 [20:28<12:45, 3.81s/it] {'loss': 10.0415, 'grad_norm': 2.4204756385996728e-05, 'learning_rate': 3.0934270442446006, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:28<12:45, 3.81s/it] 62%|██████▏ | 320/520 [20:31<12:33, 3.77s/it] {'loss': 11.0419, 'grad_norm': 1.6112996229140524e-05, 'learning_rate': 3.0668100738674204, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:31<12:33, 3.77s/it] 62%|██████▏ | 321/520 [20:35<12:35, 3.80s/it] {'loss': 10.4656, 'grad_norm': 2.528317268862339e-05, 'learning_rate': 3.040248788847869, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:35<12:35, 3.80s/it] 62%|██████▏ | 322/520 [20:39<12:24, 3.76s/it] {'loss': 11.2329, 'grad_norm': 1.6888100465224974e-05, 'learning_rate': 3.0137442212017493, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:39<12:24, 3.76s/it] 62%|██████▏ | 323/520 [20:43<12:16, 3.74s/it] {'loss': 11.3377, 'grad_norm': 1.6734553814469893e-05, 'learning_rate': 2.9872974007411623, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:43<12:16, 3.74s/it] 62%|██████▏ | 324/520 [20:46<12:14, 3.75s/it] {'loss': 10.4572, 'grad_norm': 2.777577308828579e-05, 'learning_rate': 2.960909355034491, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:46<12:14, 3.75s/it] 62%|██████▎ | 325/520 [20:50<12:08, 3.74s/it] {'loss': 10.909, 'grad_norm': 2.3599831661430666e-05, 'learning_rate': 2.9345811093664773, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:50<12:08, 3.74s/it] 63%|██████▎ | 326/520 [20:54<12:03, 3.73s/it] {'loss': 11.0095, 'grad_norm': 1.401781833488455e-05, 'learning_rate': 2.9083136866983836, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:54<12:03, 3.73s/it] 63%|██████▎ | 327/520 [20:57<11:58, 3.72s/it] {'loss': 11.4436, 'grad_norm': 1.5243188759632763e-05, 'learning_rate': 2.882108107628246, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:57<11:58, 3.72s/it] 63%|██████▎ | 328/520 [21:01<11:51, 3.71s/it] {'loss': 10.7616, 'grad_norm': 1.3376887166902928e-05, 'learning_rate': 2.8559653903512223, 'epoch': 0.63} + 63%|██████▎ | 328/520 [21:01<11:51, 3.71s/it] 63%|██████▎ | 329/520 [21:05<11:48, 3.71s/it] {'loss': 10.0722, 'grad_norm': 2.1309651592377794e-05, 'learning_rate': 2.8298865506200293, 'epoch': 0.63} + 63%|██████▎ | 329/520 [21:05<11:48, 3.71s/it] 63%|██████▎ | 330/520 [21:09<11:45, 3.71s/it] {'loss': 10.6324, 'grad_norm': 1.5982462956776444e-05, 'learning_rate': 2.8038726017054767, 'epoch': 0.63} + 63%|██████▎ | 330/520 [21:09<11:45, 3.71s/it] 64%|██████▎ | 331/520 [21:12<11:40, 3.71s/it] {'loss': 10.8991, 'grad_norm': 1.712762620357974e-05, 'learning_rate': 2.7779245543570963, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:12<11:40, 3.71s/it] 64%|██████▍ | 332/520 [21:16<11:38, 3.71s/it] {'loss': 10.9346, 'grad_norm': 1.6720640054827416e-05, 'learning_rate': 2.752043416763874, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:16<11:38, 3.71s/it] 64%|██████▍ | 333/520 [21:20<11:33, 3.71s/it] {'loss': 10.8249, 'grad_norm': 2.510837885496964e-05, 'learning_rate': 2.7262301945150735, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:20<11:33, 3.71s/it] 64%|██████▍ | 334/520 [21:23<11:29, 3.70s/it] {'loss': 10.5749, 'grad_norm': 1.6320534430894724e-05, 'learning_rate': 2.7004858905611666, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:23<11:29, 3.70s/it] 64%|██████▍ | 335/520 [21:27<11:26, 3.71s/it] {'loss': 10.424, 'grad_norm': 2.0389498652965654e-05, 'learning_rate': 2.674811505174863, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:27<11:26, 3.71s/it] 65%|██████▍ | 336/520 [21:31<11:22, 3.71s/it] {'loss': 10.819, 'grad_norm': 1.564322649828929e-05, 'learning_rate': 2.649208035912249, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:31<11:22, 3.71s/it] 65%|██████▍ | 337/520 [21:35<11:21, 3.72s/it] {'loss': 11.2895, 'grad_norm': 1.5152822337000039e-05, 'learning_rate': 2.6236764775740253, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:35<11:21, 3.72s/it] 65%|██████▌ | 338/520 [21:38<11:16, 3.72s/it] {'loss': 10.9212, 'grad_norm': 1.527959208185846e-05, 'learning_rate': 2.5982178221668533, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:38<11:16, 3.72s/it] 65%|██████▌ | 339/520 [21:42<11:11, 3.71s/it] {'loss': 11.0622, 'grad_norm': 2.1668305938916355e-05, 'learning_rate': 2.572833058864817, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:42<11:11, 3.71s/it] 65%|██████▌ | 340/520 [21:46<11:10, 3.72s/it] {'loss': 10.6982, 'grad_norm': 3.521084821552898e-05, 'learning_rate': 2.5475231739709887, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:46<11:10, 3.72s/it] 66%|██████▌ | 341/520 [21:49<11:02, 3.70s/it] {'loss': 10.6121, 'grad_norm': 4.765391955640858e-05, 'learning_rate': 2.522289150879097, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:49<11:02, 3.70s/it] 66%|██████▌ | 342/520 [21:53<10:58, 3.70s/it] {'loss': 11.339, 'grad_norm': 1.860262268533122e-05, 'learning_rate': 2.4971319700353343, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:53<10:58, 3.70s/it] 66%|██████▌ | 343/520 [21:57<10:54, 3.70s/it] {'loss': 10.6658, 'grad_norm': 1.5985996783376688e-05, 'learning_rate': 2.4720526089002455, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:57<10:54, 3.70s/it] 66%|██████▌ | 344/520 [22:00<10:49, 3.69s/it] {'loss': 10.7093, 'grad_norm': 1.790384215237004e-05, 'learning_rate': 2.4470520419107666, 'epoch': 0.66} + 66%|██████▌ | 344/520 [22:00<10:49, 3.69s/it] 66%|██████▋ | 345/520 [22:04<10:47, 3.70s/it] {'loss': 11.0151, 'grad_norm': 1.5796211560562965e-05, 'learning_rate': 2.4221312404423485, 'epoch': 0.66} + 66%|██████▋ | 345/520 [22:04<10:47, 3.70s/it] 67%|██████▋ | 346/520 [22:08<10:42, 3.69s/it] {'loss': 10.9741, 'grad_norm': 1.2950668035949234e-05, 'learning_rate': 2.397291172771221, 'epoch': 0.67} + 67%|██████▋ | 346/520 [22:08<10:42, 3.69s/it] 67%|██████▋ | 347/520 [22:12<10:38, 3.69s/it] {'loss': 9.9017, 'grad_norm': 1.698575928657444e-05, 'learning_rate': 2.372532804036779, 'epoch': 0.67} + 67%|██████▋ | 347/520 [22:12<10:38, 3.69s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:15<10:34, 3.69s/it] {'loss': 11.7767, 'grad_norm': 2.1366053812177555e-05, 'learning_rate': 2.3478570962040695, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:15<10:34, 3.69s/it] 67%|██████▋ | 349/520 [22:19<10:32, 3.70s/it] {'loss': 11.4563, 'grad_norm': 1.812594307868824e-05, 'learning_rate': 2.323265008026421, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:19<10:32, 3.70s/it] 67%|██████▋ | 350/520 [22:23<10:27, 3.69s/it] {'loss': 10.7517, 'grad_norm': 3.5084283318477604e-05, 'learning_rate': 2.2987574950082, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:23<10:27, 3.69s/it] 68%|██████▊ | 351/520 [22:26<10:26, 3.71s/it] {'loss': 10.774, 'grad_norm': 3.463390126081076e-05, 'learning_rate': 2.2743355093676665, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:26<10:26, 3.71s/it] 68%|██████▊ | 352/520 [22:30<10:24, 3.72s/it] {'loss': 10.6128, 'grad_norm': 2.5118448107924145e-05, 'learning_rate': 2.250000000000001, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:30<10:24, 3.72s/it] 68%|██████▊ | 353/520 [22:34<10:23, 3.73s/it] {'loss': 10.5855, 'grad_norm': 1.3821750039663546e-05, 'learning_rate': 2.225751912440413, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:34<10:23, 3.73s/it] 68%|██████▊ | 354/520 [22:38<10:19, 3.73s/it] {'loss': 10.8406, 'grad_norm': 1.0943655958140343e-05, 'learning_rate': 2.2015921888274157, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:38<10:19, 3.73s/it] 68%|██████▊ | 355/520 [22:41<10:13, 3.72s/it] {'loss': 10.5973, 'grad_norm': 1.273171161688673e-05, 'learning_rate': 2.1775217678662195, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:41<10:13, 3.72s/it] 68%|██████▊ | 356/520 [22:45<10:10, 3.72s/it] {'loss': 11.2066, 'grad_norm': 2.701571236634826e-05, 'learning_rate': 2.1535415847922588, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:45<10:10, 3.72s/it] 69%|██████▊ | 357/520 [22:49<10:05, 3.72s/it] {'loss': 10.047, 'grad_norm': 1.3794634002593442e-05, 'learning_rate': 2.1296525713348466, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:49<10:05, 3.72s/it] 69%|██████▉ | 358/520 [22:52<10:02, 3.72s/it] {'loss': 10.5878, 'grad_norm': 1.3460855680834262e-05, 'learning_rate': 2.105855655680986, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:52<10:02, 3.72s/it] 69%|██████▉ | 359/520 [22:56<09:59, 3.73s/it] {'loss': 11.2311, 'grad_norm': 1.0323115002560119e-05, 'learning_rate': 2.082151762439292, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:56<09:59, 3.73s/it] 69%|██████▉ | 360/520 [23:00<09:53, 3.71s/it] {'loss': 11.3779, 'grad_norm': 1.1295406719318399e-05, 'learning_rate': 2.058541812604083, 'epoch': 0.69} + 69%|██████▉ | 360/520 [23:00<09:53, 3.71s/it] 69%|██████▉ | 361/520 [23:04<09:50, 3.72s/it] {'loss': 10.5457, 'grad_norm': 1.4176080751138321e-05, 'learning_rate': 2.0350267235195796, 'epoch': 0.69} + 69%|██████▉ | 361/520 [23:04<09:50, 3.72s/it] 70%|██████▉ | 362/520 [23:07<09:47, 3.72s/it] {'loss': 10.7316, 'grad_norm': 1.0131383067768088e-05, 'learning_rate': 2.0116074088442724, 'epoch': 0.7} + 70%|██████▉ | 362/520 [23:07<09:47, 3.72s/it] 70%|██████▉ | 363/520 [23:11<09:42, 3.71s/it] {'loss': 10.6727, 'grad_norm': 1.1095350949387213e-05, 'learning_rate': 1.988284778515423, 'epoch': 0.7} + 70%|██████▉ | 363/520 [23:11<09:42, 3.71s/it] 70%|███████ | 364/520 [23:15<09:40, 3.72s/it] {'loss': 11.2067, 'grad_norm': 1.0127883324931263e-05, 'learning_rate': 1.9650597387137008, 'epoch': 0.7} + 70%|███████ | 364/520 [23:15<09:40, 3.72s/it] 70%|███████ | 365/520 [23:18<09:35, 3.71s/it] {'loss': 10.7077, 'grad_norm': 1.2347336045517074e-05, 'learning_rate': 1.9419331918279852, 'epoch': 0.7} + 70%|███████ | 365/520 [23:18<09:35, 3.71s/it] 70%|███████ | 366/520 [23:22<09:32, 3.72s/it] {'loss': 10.6676, 'grad_norm': 1.3220413887877212e-05, 'learning_rate': 1.9189060364202937, 'epoch': 0.7} + 70%|███████ | 366/520 [23:22<09:32, 3.72s/it] 71%|███████ | 367/520 [23:26<09:29, 3.72s/it] {'loss': 10.9876, 'grad_norm': 1.4547287529090257e-05, 'learning_rate': 1.8959791671908741, 'epoch': 0.71} + 71%|███████ | 367/520 [23:26<09:29, 3.72s/it] 71%|███████ | 368/520 [23:30<09:23, 3.71s/it] {'loss': 10.8336, 'grad_norm': 1.5074028438578173e-05, 'learning_rate': 1.8731534749434466, 'epoch': 0.71} + 71%|███████ | 368/520 [23:30<09:23, 3.71s/it] 71%|███████ | 369/520 [23:33<09:20, 3.71s/it] {'loss': 10.4566, 'grad_norm': 1.4955785935154652e-05, 'learning_rate': 1.850429846550579, 'epoch': 0.71} + 71%|███████ | 369/520 [23:33<09:20, 3.71s/it] 71%|███████ | 370/520 [23:37<09:16, 3.71s/it] {'loss': 10.4675, 'grad_norm': 1.680970697300613e-05, 'learning_rate': 1.8278091649192434, 'epoch': 0.71} + 71%|███████ | 370/520 [23:37<09:16, 3.71s/it] 71%|███████▏ | 371/520 [23:41<09:11, 3.70s/it] {'loss': 11.0, 'grad_norm': 1.5234609723157761e-05, 'learning_rate': 1.8052923089564987, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:41<09:11, 3.70s/it] 72%|███████▏ | 372/520 [23:44<09:10, 3.72s/it] {'loss': 10.9875, 'grad_norm': 1.1174760360581986e-05, 'learning_rate': 1.7828801535353507, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:44<09:10, 3.72s/it] 72%|███████▏ | 373/520 [23:48<09:06, 3.72s/it] {'loss': 11.3035, 'grad_norm': 1.9574782302724156e-05, 'learning_rate': 1.7605735694607572, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:48<09:06, 3.72s/it] 72%|███████▏ | 374/520 [23:52<09:00, 3.70s/it] {'loss': 10.6725, 'grad_norm': 1.279086366538912e-05, 'learning_rate': 1.7383734234357875, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:52<09:00, 3.70s/it] 72%|███████▏ | 375/520 [23:56<08:58, 3.71s/it] {'loss': 10.5842, 'grad_norm': 1.553640157295511e-05, 'learning_rate': 1.7162805780279533, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:56<08:58, 3.71s/it] 72%|███████▏ | 376/520 [23:59<08:53, 3.70s/it] {'loss': 10.4389, 'grad_norm': 1.3229961777841999e-05, 'learning_rate': 1.6942958916356994, 'epoch': 0.72} + 72%|███████▏ | 376/520 [23:59<08:53, 3.70s/it] 72%|███████▎ | 377/520 [24:03<08:49, 3.70s/it] {'loss': 10.9015, 'grad_norm': 1.4125034741723995e-05, 'learning_rate': 1.6724202184550372, 'epoch': 0.72} + 72%|███████▎ | 377/520 [24:03<08:49, 3.70s/it] 73%|███████▎ | 378/520 [24:07<08:46, 3.71s/it] {'loss': 10.7732, 'grad_norm': 1.660837604534134e-05, 'learning_rate': 1.6506544084463712, 'epoch': 0.73} + 73%|███████▎ | 378/520 [24:07<08:46, 3.71s/it] 73%|███████▎ | 379/520 [24:10<08:47, 3.74s/it] {'loss': 10.5535, 'grad_norm': 1.6230580122999435e-05, 'learning_rate': 1.628999307301462, 'epoch': 0.73} + 73%|███████▎ | 379/520 [24:10<08:47, 3.74s/it] 73%|███████▎ | 380/520 [24:14<08:49, 3.78s/it] {'loss': 10.7568, 'grad_norm': 1.6071276815271154e-05, 'learning_rate': 1.607455756410573, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:14<08:49, 3.78s/it] 73%|███████▎ | 381/520 [24:18<08:45, 3.78s/it] {'loss': 10.4816, 'grad_norm': 1.7564660532774696e-05, 'learning_rate': 1.5860245928297836, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:18<08:45, 3.78s/it] 73%|███████▎ | 382/520 [24:22<08:38, 3.76s/it] {'loss': 11.1313, 'grad_norm': 1.6735292773806654e-05, 'learning_rate': 1.5647066492484563, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:22<08:38, 3.76s/it] 74%|███████▎ | 383/520 [24:26<08:32, 3.74s/it] {'loss': 11.3079, 'grad_norm': 2.284865856284503e-05, 'learning_rate': 1.5435027539568884, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:26<08:32, 3.74s/it] 74%|███████▍ | 384/520 [24:29<08:26, 3.73s/it] {'loss': 11.2146, 'grad_norm': 1.4956499285371477e-05, 'learning_rate': 1.5224137308141337, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:29<08:26, 3.73s/it] 74%|███████▍ | 385/520 [24:33<08:22, 3.72s/it] {'loss': 10.3856, 'grad_norm': 2.4146626216622515e-05, 'learning_rate': 1.5014403992159824, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:33<08:22, 3.72s/it] 74%|███████▍ | 386/520 [24:37<08:17, 3.72s/it] {'loss': 10.2499, 'grad_norm': 3.18290042090272e-05, 'learning_rate': 1.4805835740631352, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:37<08:17, 3.72s/it] 74%|███████▍ | 387/520 [24:40<08:12, 3.70s/it] {'loss': 11.0128, 'grad_norm': 1.695489513754929e-05, 'learning_rate': 1.4598440657295289, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:40<08:12, 3.70s/it] 75%|███████▍ | 388/520 [24:44<08:07, 3.69s/it] {'loss': 10.7334, 'grad_norm': 2.1893864096091302e-05, 'learning_rate': 1.4392226800308618, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:44<08:07, 3.69s/it] 75%|███████▍ | 389/520 [24:48<08:03, 3.69s/it] {'loss': 11.3308, 'grad_norm': 1.8163348729033852e-05, 'learning_rate': 1.4187202181932792, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:48<08:03, 3.69s/it] 75%|███████▌ | 390/520 [24:51<07:57, 3.67s/it] {'loss': 10.6262, 'grad_norm': 1.4800618802371596e-05, 'learning_rate': 1.3983374768222383, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:51<07:57, 3.67s/it] 75%|███████▌ | 391/520 [24:55<07:54, 3.68s/it] {'loss': 10.7819, 'grad_norm': 1.0011685504226489e-05, 'learning_rate': 1.3780752478715625, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:55<07:54, 3.68s/it] 75%|███████▌ | 392/520 [24:59<07:51, 3.68s/it] {'loss': 10.6535, 'grad_norm': 1.302671984335558e-05, 'learning_rate': 1.3579343186126727, 'epoch': 0.75} + 75%|███████▌ | 392/520 [24:59<07:51, 3.68s/it] 76%|███████▌ | 393/520 [25:02<07:48, 3.69s/it] {'loss': 10.5802, 'grad_norm': 1.198727562346895e-05, 'learning_rate': 1.337915471603989, 'epoch': 0.76} + 76%|███████▌ | 393/520 [25:02<07:48, 3.69s/it] 76%|███████▌ | 394/520 [25:06<07:44, 3.69s/it] {'loss': 10.6757, 'grad_norm': 1.1875533268589186e-05, 'learning_rate': 1.3180194846605364, 'epoch': 0.76} + 76%|███████▌ | 394/520 [25:06<07:44, 3.69s/it] 76%|███████▌ | 395/520 [25:10<07:41, 3.69s/it] {'loss': 10.7692, 'grad_norm': 1.1439817068530012e-05, 'learning_rate': 1.2982471308237153, 'epoch': 0.76} + 76%|███████▌ | 395/520 [25:10<07:41, 3.69s/it] 76%|███████▌ | 396/520 [25:13<07:37, 3.69s/it] {'loss': 10.7941, 'grad_norm': 1.0663228888082076e-05, 'learning_rate': 1.278599178331267, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:13<07:37, 3.69s/it] 76%|███████▋ | 397/520 [25:17<07:35, 3.70s/it] {'loss': 10.6862, 'grad_norm': 1.1442392589897557e-05, 'learning_rate': 1.2590763905874311, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:17<07:35, 3.70s/it] 77%|███████▋ | 398/520 [25:21<07:33, 3.72s/it] {'loss': 11.1848, 'grad_norm': 1.1612874115706902e-05, 'learning_rate': 1.2396795261332731, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:21<07:33, 3.72s/it] 77%|███████▋ | 399/520 [25:25<07:29, 3.72s/it] {'loss': 10.9586, 'grad_norm': 9.8568226401345e-06, 'learning_rate': 1.2204093386172226, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:25<07:29, 3.72s/it] 77%|███████▋ | 400/520 [25:28<07:25, 3.71s/it] {'loss': 10.8754, 'grad_norm': 1.00917315276274e-05, 'learning_rate': 1.2012665767657824, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:28<07:25, 3.71s/it] 77%|███████▋ | 401/520 [25:32<07:21, 3.71s/it] {'loss': 10.1559, 'grad_norm': 1.7599770186577572e-05, 'learning_rate': 1.1822519843544421, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:32<07:21, 3.71s/it] 77%|███████▋ | 402/520 [25:36<07:17, 3.71s/it] {'loss': 10.6643, 'grad_norm': 1.2833827094396033e-05, 'learning_rate': 1.1633663001787797, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:36<07:17, 3.71s/it] 78%|███████▊ | 403/520 [25:39<07:13, 3.71s/it] {'loss': 10.4766, 'grad_norm': 1.2873334614133727e-05, 'learning_rate': 1.144610258025755, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:39<07:13, 3.71s/it] 78%|███████▊ | 404/520 [25:43<07:09, 3.70s/it] {'loss': 10.9414, 'grad_norm': 1.29713409027095e-05, 'learning_rate': 1.1259845866451956, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:43<07:09, 3.70s/it] 78%|███████▊ | 405/520 [25:47<07:06, 3.70s/it] {'loss': 10.6541, 'grad_norm': 1.1455611005732144e-05, 'learning_rate': 1.1074900097214908, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:47<07:06, 3.70s/it] 78%|███████▊ | 406/520 [25:51<07:01, 3.70s/it] {'loss': 11.6012, 'grad_norm': 1.4743285623966242e-05, 'learning_rate': 1.0891272458454608, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:51<07:01, 3.70s/it] 78%|███████▊ | 407/520 [25:54<06:59, 3.72s/it] {'loss': 10.8721, 'grad_norm': 1.2444952533900446e-05, 'learning_rate': 1.0708970084864515, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:54<06:59, 3.72s/it] 78%|███████▊ | 408/520 [25:58<06:56, 3.72s/it] {'loss': 10.5448, 'grad_norm': 1.4069077364119236e-05, 'learning_rate': 1.0528000059645994, 'epoch': 0.78} + 78%|███████▊ | 408/520 [25:58<06:56, 3.72s/it] 79%|███████▊ | 409/520 [26:02<06:53, 3.72s/it] {'loss': 11.0131, 'grad_norm': 1.2630580049945997e-05, 'learning_rate': 1.0348369414233174, 'epoch': 0.79} + 79%|███████▊ | 409/520 [26:02<06:53, 3.72s/it] 79%|███████▉ | 410/520 [26:05<06:49, 3.72s/it] {'loss': 10.7554, 'grad_norm': 1.938206929243663e-05, 'learning_rate': 1.0170085128019768, 'epoch': 0.79} + 79%|███████▉ | 410/520 [26:05<06:49, 3.72s/it] 79%|███████▉ | 411/520 [26:09<06:45, 3.72s/it] {'loss': 10.7728, 'grad_norm': 1.650217334955214e-05, 'learning_rate': 0.9993154128087836, 'epoch': 0.79} + 79%|███████▉ | 411/520 [26:09<06:45, 3.72s/it] 79%|███████▉ | 412/520 [26:13<06:42, 3.73s/it] {'loss': 10.7364, 'grad_norm': 9.835072246670878e-06, 'learning_rate': 0.9817583288938658, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:13<06:42, 3.73s/it] 79%|███████▉ | 413/520 [26:17<06:38, 3.72s/it] {'loss': 11.3352, 'grad_norm': 9.270793628416473e-06, 'learning_rate': 0.9643379432225693, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:17<06:38, 3.72s/it] 80%|███████▉ | 414/520 [26:20<06:32, 3.71s/it] {'loss': 10.6913, 'grad_norm': 1.1681135325325944e-05, 'learning_rate': 0.947054932648941, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:20<06:32, 3.71s/it] 80%|███████▉ | 415/520 [26:24<06:31, 3.73s/it] {'loss': 10.4015, 'grad_norm': 1.3520699819453492e-05, 'learning_rate': 0.9299099686894423, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:24<06:31, 3.73s/it] 80%|████████ | 416/520 [26:28<06:27, 3.72s/it] {'loss': 11.5251, 'grad_norm': 2.3706283626113548e-05, 'learning_rate': 0.9129037174968502, 'epoch': 0.8} + 80%|████████ | 416/520 [26:28<06:27, 3.72s/it] 80%|████████ | 417/520 [26:32<06:22, 3.71s/it] {'loss': 10.3531, 'grad_norm': 1.7807611648202756e-05, 'learning_rate': 0.8960368398343747, 'epoch': 0.8} + 80%|████████ | 417/520 [26:32<06:22, 3.71s/it] 80%|████████ | 418/520 [26:35<06:19, 3.72s/it] {'loss': 10.4747, 'grad_norm': 1.3730958826764245e-05, 'learning_rate': 0.8793099910499924, 'epoch': 0.8} + 80%|████████ | 418/520 [26:35<06:19, 3.72s/it] 81%|████████ | 419/520 [26:39<06:14, 3.71s/it] {'loss': 11.1706, 'grad_norm': 1.1239382267702683e-05, 'learning_rate': 0.8627238210509764, 'epoch': 0.81} + 81%|████████ | 419/520 [26:39<06:14, 3.71s/it] 81%|████████ | 420/520 [26:43<06:11, 3.71s/it] {'loss': 10.894, 'grad_norm': 1.3764702818438566e-05, 'learning_rate': 0.8462789742786457, 'epoch': 0.81} + 81%|████████ | 420/520 [26:43<06:11, 3.71s/it] 81%|████████ | 421/520 [26:46<06:07, 3.71s/it] {'loss': 11.4835, 'grad_norm': 1.8216472662201374e-05, 'learning_rate': 0.8299760896833293, 'epoch': 0.81} + 81%|████████ | 421/520 [26:46<06:07, 3.71s/it] 81%|████████ | 422/520 [26:50<06:05, 3.72s/it] {'loss': 10.6744, 'grad_norm': 1.7498435197651124e-05, 'learning_rate': 0.8138158006995364, 'epoch': 0.81} + 81%|████████ | 422/520 [26:50<06:05, 3.72s/it] 81%|████████▏ | 423/520 [26:54<06:01, 3.73s/it] {'loss': 11.4339, 'grad_norm': 1.5760384913397178e-05, 'learning_rate': 0.7977987352213499, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:54<06:01, 3.73s/it] 82%|████████▏ | 424/520 [26:58<05:58, 3.74s/it] {'loss': 10.8092, 'grad_norm': 1.7583872855449242e-05, 'learning_rate': 0.7819255155780239, 'epoch': 0.82} + 82%|████████▏ | 424/520 [26:58<05:58, 3.74s/it] 82%|████████▏ | 425/520 [27:01<05:53, 3.72s/it] {'loss': 10.4139, 'grad_norm': 2.3390509735528284e-05, 'learning_rate': 0.7661967585098063, 'epoch': 0.82} + 82%|████████▏ | 425/520 [27:01<05:53, 3.72s/it] 82%|████████▏ | 426/520 [27:05<05:49, 3.72s/it] {'loss': 11.1765, 'grad_norm': 1.614526793537015e-05, 'learning_rate': 0.7506130751439803, 'epoch': 0.82} + 82%|████████▏ | 426/520 [27:05<05:49, 3.72s/it] 82%|████████▏ | 427/520 [27:09<05:45, 3.71s/it] {'loss': 10.231, 'grad_norm': 2.373045446054645e-05, 'learning_rate': 0.7351750709711111, 'epoch': 0.82} + 82%|████████▏ | 427/520 [27:09<05:45, 3.71s/it] 82%|████████▏ | 428/520 [27:12<05:41, 3.71s/it] {'loss': 10.6176, 'grad_norm': 2.3057875560965885e-05, 'learning_rate': 0.7198833458215287, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:12<05:41, 3.71s/it] 82%|████████▎ | 429/520 [27:16<05:37, 3.71s/it] {'loss': 10.8717, 'grad_norm': 2.230722382408269e-05, 'learning_rate': 0.7047384938420153, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:16<05:37, 3.71s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:20<05:33, 3.71s/it] {'loss': 9.8745, 'grad_norm': 3.094521173534905e-05, 'learning_rate': 0.6897411034727214, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:20<05:33, 3.71s/it] 83%|████████▎ | 431/520 [27:24<05:30, 3.71s/it] {'loss': 11.1031, 'grad_norm': 2.037304804978328e-05, 'learning_rate': 0.674891757424309, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:24<05:30, 3.71s/it] 83%|████████▎ | 432/520 [27:27<05:28, 3.73s/it] {'loss': 10.6714, 'grad_norm': 2.6007675218929906e-05, 'learning_rate': 0.6601910326552998, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:27<05:28, 3.73s/it] 83%|████████▎ | 433/520 [27:31<05:22, 3.71s/it] {'loss': 10.6193, 'grad_norm': 2.4710887867078882e-05, 'learning_rate': 0.645639500349669, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:31<05:22, 3.71s/it] 83%|████████▎ | 434/520 [27:35<05:19, 3.71s/it] {'loss': 10.6216, 'grad_norm': 3.498739514511438e-05, 'learning_rate': 0.6312377258946437, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:35<05:19, 3.71s/it] 84%|████████▎ | 435/520 [27:38<05:15, 3.71s/it] {'loss': 10.6568, 'grad_norm': 2.7523949001841107e-05, 'learning_rate': 0.6169862688587413, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:38<05:15, 3.71s/it] 84%|████████▍ | 436/520 [27:42<05:10, 3.70s/it] {'loss': 10.744, 'grad_norm': 3.86324996021773e-05, 'learning_rate': 0.6028856829700258, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:42<05:10, 3.70s/it] 84%|████████▍ | 437/520 [27:46<05:08, 3.71s/it] {'loss': 10.7826, 'grad_norm': 3.0326515448676333e-05, 'learning_rate': 0.5889365160945912, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:46<05:08, 3.71s/it] 84%|████████▍ | 438/520 [27:50<05:04, 3.72s/it] {'loss': 10.3413, 'grad_norm': 5.316706325711297e-05, 'learning_rate': 0.575139310215276, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:50<05:04, 3.72s/it] 84%|████████▍ | 439/520 [27:53<05:01, 3.72s/it] {'loss': 10.4263, 'grad_norm': 4.523729888049435e-05, 'learning_rate': 0.5614946014106084, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:53<05:01, 3.72s/it] 85%|████████▍ | 440/520 [27:57<04:56, 3.71s/it] {'loss': 10.5662, 'grad_norm': 4.985731924076335e-05, 'learning_rate': 0.5480029198339711, 'epoch': 0.85} + 85%|████████▍ | 440/520 [27:57<04:56, 3.71s/it] 85%|████████▍ | 441/520 [28:01<04:53, 3.71s/it] {'loss': 10.9454, 'grad_norm': 4.010924661421172e-05, 'learning_rate': 0.5346647896930092, 'epoch': 0.85} + 85%|████████▍ | 441/520 [28:01<04:53, 3.71s/it] 85%|████████▌ | 442/520 [28:04<04:50, 3.72s/it] {'loss': 11.1351, 'grad_norm': 4.386910290578911e-05, 'learning_rate': 0.5214807292292565, 'epoch': 0.85} + 85%|████████▌ | 442/520 [28:04<04:50, 3.72s/it] 85%|████████▌ | 443/520 [28:08<04:45, 3.70s/it] {'loss': 10.342, 'grad_norm': 5.059625399940287e-05, 'learning_rate': 0.5084512506980023, 'epoch': 0.85} + 85%|████████▌ | 443/520 [28:08<04:45, 3.70s/it] 85%|████████▌ | 444/520 [28:12<04:40, 3.69s/it] {'loss': 10.4575, 'grad_norm': 4.5699711775315876e-05, 'learning_rate': 0.49557686034839155, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:12<04:40, 3.69s/it] 86%|████████▌ | 445/520 [28:16<04:38, 3.72s/it] {'loss': 10.2522, 'grad_norm': 5.867130797940161e-05, 'learning_rate': 0.48285805840374907, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:16<04:38, 3.72s/it] 86%|████████▌ | 446/520 [28:19<04:35, 3.72s/it] {'loss': 10.6311, 'grad_norm': 2.902617575264474e-05, 'learning_rate': 0.4702953390421458, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:19<04:35, 3.72s/it] 86%|████████▌ | 447/520 [28:23<04:32, 3.73s/it] {'loss': 11.2669, 'grad_norm': 2.1636077874816266e-05, 'learning_rate': 0.4578891903772018, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:23<04:32, 3.73s/it] 86%|████████▌ | 448/520 [28:27<04:27, 3.72s/it] {'loss': 10.5621, 'grad_norm': 2.4207142060037067e-05, 'learning_rate': 0.44564009443911434, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:27<04:27, 3.72s/it] 86%|████████▋ | 449/520 [28:30<04:24, 3.73s/it] {'loss': 11.2483, 'grad_norm': 1.3506616518484262e-05, 'learning_rate': 0.43354852715593584, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:30<04:24, 3.73s/it] 87%|████████▋ | 450/520 [28:34<04:21, 3.73s/it] {'loss': 10.767, 'grad_norm': 1.5481515128851602e-05, 'learning_rate': 0.4216149583350753, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:34<04:21, 3.73s/it] 87%|████████▋ | 451/520 [28:38<04:18, 3.75s/it] {'loss': 11.0893, 'grad_norm': 1.4000321104655879e-05, 'learning_rate': 0.40983985164505077, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:38<04:18, 3.75s/it] 87%|████████▋ | 452/520 [28:42<04:13, 3.72s/it] {'loss': 10.6533, 'grad_norm': 1.3704561472083333e-05, 'learning_rate': 0.3982236645974709, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:42<04:13, 3.72s/it] 87%|████████▋ | 453/520 [28:45<04:09, 3.73s/it] {'loss': 11.1886, 'grad_norm': 1.1306296214422086e-05, 'learning_rate': 0.38676684852925647, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:45<04:09, 3.73s/it] 87%|████████▋ | 454/520 [28:49<04:05, 3.72s/it] {'loss': 10.7278, 'grad_norm': 1.4145177159276502e-05, 'learning_rate': 0.3754698485851071, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:49<04:05, 3.72s/it] 88%|████████▊ | 455/520 [28:53<04:00, 3.71s/it] {'loss': 10.4577, 'grad_norm': 1.3652177382917529e-05, 'learning_rate': 0.36433310370020705, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:53<04:00, 3.71s/it] 88%|████████▊ | 456/520 [28:56<03:56, 3.70s/it] {'loss': 10.3819, 'grad_norm': 1.3914138211537592e-05, 'learning_rate': 0.3533570465831652, 'epoch': 0.88} + 88%|████████▊ | 456/520 [28:56<03:56, 3.70s/it] 88%|████████▊ | 457/520 [29:00<03:52, 3.69s/it] {'loss': 11.5623, 'grad_norm': 1.1115357819110277e-05, 'learning_rate': 0.3425421036992097, 'epoch': 0.88} + 88%|████████▊ | 457/520 [29:00<03:52, 3.69s/it] 88%|████████▊ | 458/520 [29:04<03:48, 3.69s/it] {'loss': 10.9537, 'grad_norm': 1.1030114363803618e-05, 'learning_rate': 0.3318886952536111, 'epoch': 0.88} + 88%|████████▊ | 458/520 [29:04<03:48, 3.69s/it] 88%|████████▊ | 459/520 [29:07<03:45, 3.70s/it] {'loss': 10.6688, 'grad_norm': 1.2092848072936806e-05, 'learning_rate': 0.321397235175359, 'epoch': 0.88} + 88%|████████▊ | 459/520 [29:07<03:45, 3.70s/it] 88%|████████▊ | 460/520 [29:11<03:41, 3.70s/it] {'loss': 10.8445, 'grad_norm': 1.3639518283069346e-05, 'learning_rate': 0.3110681311010814, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:11<03:41, 3.70s/it] 89%|████████▊ | 461/520 [29:15<03:40, 3.74s/it] {'loss': 10.8023, 'grad_norm': 1.3763742365934527e-05, 'learning_rate': 0.30090178435920073, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:15<03:40, 3.74s/it] 89%|████████▉ | 462/520 [29:19<03:39, 3.78s/it] {'loss': 10.9697, 'grad_norm': 1.3035384333365307e-05, 'learning_rate': 0.29089858995434703, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:19<03:39, 3.78s/it] 89%|████████▉ | 463/520 [29:23<03:37, 3.81s/it] {'loss': 11.2779, 'grad_norm': 1.7875574365698283e-05, 'learning_rate': 0.2810589365520041, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:23<03:37, 3.81s/it] 89%|████████▉ | 464/520 [29:27<03:34, 3.84s/it] {'loss': 11.0156, 'grad_norm': 1.5652931382058966e-05, 'learning_rate': 0.2713832064634126, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:27<03:34, 3.84s/it] 89%|████████▉ | 465/520 [29:31<03:31, 3.85s/it] {'loss': 10.798, 'grad_norm': 1.7361462582522555e-05, 'learning_rate': 0.2618717756307144, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:31<03:31, 3.85s/it] 90%|████████▉ | 466/520 [29:35<03:29, 3.89s/it] {'loss': 10.2996, 'grad_norm': 2.156149898958026e-05, 'learning_rate': 0.2525250136123459, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:35<03:29, 3.89s/it] 90%|████████▉ | 467/520 [29:38<03:26, 3.90s/it] {'loss': 11.0662, 'grad_norm': 2.3478744429929336e-05, 'learning_rate': 0.2433432835686779, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:38<03:26, 3.90s/it] 90%|█████████ | 468/520 [29:42<03:22, 3.90s/it] {'loss': 11.367, 'grad_norm': 3.311520241565561e-05, 'learning_rate': 0.23432694224790734, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:42<03:22, 3.90s/it] 90%|█████████ | 469/520 [29:46<03:19, 3.92s/it] {'loss': 10.8194, 'grad_norm': 2.7297011340904883e-05, 'learning_rate': 0.22547633997219302, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:46<03:19, 3.92s/it] 90%|█████████ | 470/520 [29:50<03:12, 3.85s/it] {'loss': 10.638, 'grad_norm': 1.53746251052945e-05, 'learning_rate': 0.2167918206240494, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:50<03:12, 3.85s/it] 91%|█████████ | 471/520 [29:54<03:06, 3.80s/it] {'loss': 11.6127, 'grad_norm': 1.3359519554869207e-05, 'learning_rate': 0.2082737216329793, 'epoch': 0.91} + 91%|█████████ | 471/520 [29:54<03:06, 3.80s/it] 91%|█████████ | 472/520 [29:57<03:01, 3.78s/it] {'loss': 10.9418, 'grad_norm': 1.3168514593429749e-05, 'learning_rate': 0.19992237396236645, 'epoch': 0.91} + 91%|█████████ | 472/520 [29:57<03:01, 3.78s/it] 91%|█████████ | 473/520 [30:01<02:57, 3.77s/it] {'loss': 10.9452, 'grad_norm': 1.1824983700326391e-05, 'learning_rate': 0.19173810209661868, 'epoch': 0.91} + 91%|█████████ | 473/520 [30:01<02:57, 3.77s/it] 91%|█████████ | 474/520 [30:05<02:53, 3.77s/it] {'loss': 11.3379, 'grad_norm': 1.0225993352234627e-05, 'learning_rate': 0.18372122402855506, 'epoch': 0.91} + 91%|█████████ | 474/520 [30:05<02:53, 3.77s/it] 91%|█████████▏| 475/520 [30:09<02:49, 3.76s/it] {'loss': 10.7607, 'grad_norm': 1.1274872247187104e-05, 'learning_rate': 0.1758720512470523, 'epoch': 0.91} + 91%|█████████▏| 475/520 [30:09<02:49, 3.76s/it] 92%|█████████▏| 476/520 [30:12<02:43, 3.72s/it] {'loss': 10.8812, 'grad_norm': 1.1147501333613153e-05, 'learning_rate': 0.16819088872494586, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:12<02:43, 3.72s/it] 92%|█████████▏| 477/520 [30:16<02:39, 3.72s/it] {'loss': 10.7214, 'grad_norm': 1.1639935286748365e-05, 'learning_rate': 0.16067803490717553, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:16<02:39, 3.72s/it] 92%|█████████▏| 478/520 [30:20<02:36, 3.74s/it] {'loss': 10.6193, 'grad_norm': 1.2329002078529245e-05, 'learning_rate': 0.1533337816991931, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:20<02:36, 3.74s/it] 92%|█████████▏| 479/520 [30:24<02:35, 3.78s/it] {'loss': 11.2323, 'grad_norm': 9.742248115445218e-06, 'learning_rate': 0.1461584144556175, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:24<02:35, 3.78s/it] 92%|█████████▏| 480/520 [30:28<02:32, 3.80s/it] {'loss': 11.4167, 'grad_norm': 1.059994762431259e-05, 'learning_rate': 0.13915221196914968, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:28<02:32, 3.80s/it] 92%|█████████▎| 481/520 [30:31<02:29, 3.83s/it] {'loss': 10.9269, 'grad_norm': 1.0727006777098838e-05, 'learning_rate': 0.1323154464597407, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:31<02:29, 3.83s/it] 93%|█████████▎| 482/520 [30:35<02:25, 3.84s/it] {'loss': 11.151, 'grad_norm': 1.0221094892104055e-05, 'learning_rate': 0.12564838356401475, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:35<02:25, 3.84s/it] 93%|█████████▎| 483/520 [30:39<02:22, 3.85s/it] {'loss': 10.6315, 'grad_norm': 1.1818145541740668e-05, 'learning_rate': 0.11915128232494493, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:39<02:22, 3.85s/it] 93%|█████████▎| 484/520 [30:43<02:18, 3.84s/it] {'loss': 11.0839, 'grad_norm': 1.1253983090771787e-05, 'learning_rate': 0.11282439518179371, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:43<02:18, 3.84s/it] 93%|█████████▎| 485/520 [30:47<02:14, 3.85s/it] {'loss': 10.5903, 'grad_norm': 1.2712930542443042e-05, 'learning_rate': 0.10666796796029987, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:47<02:14, 3.85s/it] 93%|█████████▎| 486/520 [30:51<02:11, 3.86s/it] {'loss': 10.4345, 'grad_norm': 1.4218536039045086e-05, 'learning_rate': 0.10068223986312957, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:51<02:11, 3.86s/it] 94%|█████████▎| 487/520 [30:55<02:08, 3.88s/it] {'loss': 10.4253, 'grad_norm': 1.414793065427798e-05, 'learning_rate': 0.09486744346058235, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:55<02:08, 3.88s/it] 94%|█████████▍| 488/520 [30:59<02:04, 3.89s/it] {'loss': 10.9303, 'grad_norm': 1.3189680914568165e-05, 'learning_rate': 0.08922380468155278, 'epoch': 0.94} + 94%|█████████▍| 488/520 [30:59<02:04, 3.89s/it] 94%|█████████▍| 489/520 [31:02<02:00, 3.87s/it] {'loss': 10.5967, 'grad_norm': 1.4314795888494878e-05, 'learning_rate': 0.08375154280475555, 'epoch': 0.94} + 94%|█████████▍| 489/520 [31:02<02:00, 3.87s/it] 94%|█████████▍| 490/520 [31:06<01:56, 3.88s/it] {'loss': 10.6555, 'grad_norm': 1.3567454549576009e-05, 'learning_rate': 0.07845087045020277, 'epoch': 0.94} + 94%|█████████▍| 490/520 [31:06<01:56, 3.88s/it] 94%|█████████▍| 491/520 [31:10<01:52, 3.88s/it] {'loss': 10.8198, 'grad_norm': 1.3288390191263291e-05, 'learning_rate': 0.07332199357094404, 'epoch': 0.94} + 94%|█████████▍| 491/520 [31:10<01:52, 3.88s/it] 95%|█████████▍| 492/520 [31:14<01:48, 3.86s/it] {'loss': 10.6559, 'grad_norm': 1.4148694503839379e-05, 'learning_rate': 0.06836511144506391, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:14<01:48, 3.86s/it] 95%|█████████▍| 493/520 [31:18<01:44, 3.86s/it] {'loss': 11.5756, 'grad_norm': 1.181149974134144e-05, 'learning_rate': 0.06358041666793851, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:18<01:44, 3.86s/it] 95%|█████████▌| 494/520 [31:22<01:40, 3.87s/it] {'loss': 10.6113, 'grad_norm': 1.4622304848348117e-05, 'learning_rate': 0.058968095144755095, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:22<01:40, 3.87s/it] 95%|█████████▌| 495/520 [31:26<01:37, 3.89s/it] {'loss': 10.1873, 'grad_norm': 1.788727552694343e-05, 'learning_rate': 0.054528326083283785, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:26<01:37, 3.89s/it] 95%|█████████▌| 496/520 [31:30<01:33, 3.88s/it] {'loss': 10.7533, 'grad_norm': 1.548930183225642e-05, 'learning_rate': 0.050261281986921647, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:30<01:33, 3.88s/it] 96%|█████████▌| 497/520 [31:33<01:29, 3.88s/it] {'loss': 10.7022, 'grad_norm': 1.4990337147354568e-05, 'learning_rate': 0.04616712864798306, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:33<01:29, 3.88s/it] 96%|█████████▌| 498/520 [31:37<01:25, 3.88s/it] {'loss': 10.378, 'grad_norm': 1.7280027798021525e-05, 'learning_rate': 0.042246025141262356, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:37<01:25, 3.88s/it] 96%|█████████▌| 499/520 [31:41<01:21, 3.88s/it] {'loss': 11.2919, 'grad_norm': 1.3406799609220637e-05, 'learning_rate': 0.03849812381785328, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:41<01:21, 3.88s/it] 96%|█████████▌| 500/520 [31:45<01:17, 3.87s/it] {'loss': 11.0191, 'grad_norm': 1.408770020652023e-05, 'learning_rate': 0.034923570299225715, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:45<01:17, 3.87s/it] 96%|█████████▋| 501/520 [31:49<01:13, 3.87s/it] {'loss': 11.3244, 'grad_norm': 1.3596797334225001e-05, 'learning_rate': 0.031522503471571706, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:49<01:13, 3.87s/it] 97%|█████████▋| 502/520 [31:53<01:09, 3.88s/it] {'loss': 10.4679, 'grad_norm': 1.7990662424937947e-05, 'learning_rate': 0.028295055480408282, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:53<01:09, 3.88s/it] 97%|█████████▋| 503/520 [31:57<01:06, 3.89s/it] {'loss': 10.991, 'grad_norm': 1.4798294184200137e-05, 'learning_rate': 0.025241351725441064, 'epoch': 0.97} + 97%|█████████▋| 503/520 [31:57<01:06, 3.89s/it] 97%|█████████▋| 504/520 [32:01<01:02, 3.88s/it] {'loss': 11.4824, 'grad_norm': 1.5538755589737224e-05, 'learning_rate': 0.022361510855693656, 'epoch': 0.97} + 97%|█████████▋| 504/520 [32:01<01:02, 3.88s/it] 97%|█████████▋| 505/520 [32:04<00:58, 3.88s/it] {'loss': 10.7148, 'grad_norm': 1.670893198363698e-05, 'learning_rate': 0.01965564476489784, 'epoch': 0.97} + 97%|█████████▋| 505/520 [32:04<00:58, 3.88s/it] 97%|█████████▋| 506/520 [32:08<00:54, 3.87s/it] {'loss': 10.4838, 'grad_norm': 1.793202552072829e-05, 'learning_rate': 0.017123858587145047, 'epoch': 0.97} + 97%|█████████▋| 506/520 [32:08<00:54, 3.87s/it] 98%|█████████▊| 507/520 [32:12<00:50, 3.86s/it] {'loss': 11.3609, 'grad_norm': 1.5709232947011383e-05, 'learning_rate': 0.01476625069280213, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:12<00:50, 3.86s/it] 98%|█████████▊| 508/520 [32:16<00:45, 3.80s/it] {'loss': 10.7112, 'grad_norm': 1.8044137555576435e-05, 'learning_rate': 0.012582912684689418, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:16<00:45, 3.80s/it] 98%|█████████▊| 509/520 [32:20<00:41, 3.79s/it] {'loss': 10.3162, 'grad_norm': 2.077198444846192e-05, 'learning_rate': 0.010573929394520065, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:20<00:41, 3.79s/it] 98%|█████████▊| 510/520 [32:23<00:37, 3.76s/it] {'loss': 10.6964, 'grad_norm': 1.8008875650455897e-05, 'learning_rate': 0.008739378879606685, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:23<00:37, 3.76s/it] 98%|█████████▊| 511/520 [32:27<00:33, 3.74s/it] {'loss': 10.583, 'grad_norm': 1.9047870673158085e-05, 'learning_rate': 0.007079332419825279, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:27<00:33, 3.74s/it] 98%|█████████▊| 512/520 [32:31<00:29, 3.72s/it] {'loss': 10.3516, 'grad_norm': 1.9971661092992888e-05, 'learning_rate': 0.00559385451484945, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:31<00:29, 3.72s/it] 99%|█████████▊| 513/520 [32:34<00:25, 3.71s/it] {'loss': 10.7796, 'grad_norm': 1.7621586604435836e-05, 'learning_rate': 0.004283002881639908, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:34<00:25, 3.71s/it] 99%|█████████▉| 514/520 [32:38<00:22, 3.70s/it] {'loss': 10.6352, 'grad_norm': 1.9382300998116753e-05, 'learning_rate': 0.003146828452206263, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:38<00:22, 3.70s/it] 99%|█████████▉| 515/520 [32:42<00:18, 3.69s/it] {'loss': 10.8605, 'grad_norm': 1.822149338004051e-05, 'learning_rate': 0.0021853753716256086, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:42<00:18, 3.69s/it] 99%|█████████▉| 516/520 [32:45<00:14, 3.69s/it] {'loss': 10.9836, 'grad_norm': 1.690386717749524e-05, 'learning_rate': 0.0013986809963268954, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:45<00:14, 3.69s/it] 99%|█████████▉| 517/520 [32:49<00:11, 3.69s/it] {'loss': 11.1033, 'grad_norm': 1.6629686431711642e-05, 'learning_rate': 0.0007867758926410895, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:49<00:11, 3.69s/it] 100%|█████████▉| 518/520 [32:53<00:07, 3.68s/it] {'loss': 10.8292, 'grad_norm': 1.784087281264796e-05, 'learning_rate': 0.00034968383561312377, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:53<00:07, 3.68s/it] 100%|█████████▉| 519/520 [32:56<00:03, 3.69s/it] {'loss': 11.1868, 'grad_norm': 1.567678527197799e-05, 'learning_rate': 8.742180807813638e-05, 'epoch': 1.0} + 100%|█████████▉| 519/520 [32:56<00:03, 3.69s/it] 100%|██████████| 520/520 [33:01<00:00, 3.93s/it] {'loss': 10.9497, 'grad_norm': 1.6669081982774498e-05, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [33:01<00:00, 3.93s/it] {'train_runtime': 1981.4255, 'train_samples_per_second': 33.576, 'train_steps_per_second': 0.262, 'train_loss': 10.849154564967522, 'epoch': 1.0} + 100%|██████████| 520/520 [33:01<00:00, 3.93s/it] 100%|██████████| 520/520 [33:01<00:00, 3.81s/it] +[2025-10-09 07:32:47,020] [INFO] [launch.py:348:main] Process 867096 exits successfully. +[2025-10-09 07:32:48,020] [INFO] [launch.py:348:main] Process 867097 exits successfully. +[2025-10-09 07:32:48,021] [INFO] [launch.py:348:main] Process 867094 exits successfully. +[2025-10-09 07:32:48,021] [INFO] [launch.py:348:main] Process 867093 exits successfully. +[2025-10-09 07:32:48,021] [INFO] [launch.py:348:main] Process 867092 exits successfully. +[2025-10-09 07:32:48,022] [INFO] [launch.py:348:main] Process 867098 exits successfully. +[2025-10-09 07:32:49,023] [INFO] [launch.py:348:main] Process 867095 exits successfully. +[2025-10-09 07:32:52,027] [INFO] [launch.py:348:main] Process 867091 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9_connector-3.0_0.5_9_ablation_20251009_065816.log +Timestamp: 2025-10-09 07:32:54 +===================================== diff --git a/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation_20251009_102218.log b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation_20251009_102218.log new file mode 100644 index 0000000000000000000000000000000000000000..9b5513edbd8741b18fe6603ed6fce308c9189d74 --- /dev/null +++ b/logs_oct9/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation_20251009_102218.log @@ -0,0 +1,2312 @@ +==== STARTING EXPERIMENT: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation_20251009_102218.log +Timestamp: 2025-10-09 10:22:18 +===================================== +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 10:22:20,878] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:22:23,567] [WARNING] [runner.py:202:fetch_hostfile] Unable to find hostfile, will proceed with training with local resources only. +[2025-10-09 10:22:23,569] [INFO] [runner.py:568:main] cmd = /opt/conda/envs/tinyllava/bin/python3.10 -u -m deepspeed.launcher.launch --world_info=eyJsb2NhbGhvc3QiOiBbMCwgMSwgMiwgMywgNCwgNSwgNiwgN119 --master_addr=127.0.0.1 --master_port=29501 --enable_each_rank_log=None tinyllava/train/train.py --deepspeed ./scripts/zero3.json --data_path /root/dataset/text_files/llava_v1_5_mix665k.json --image_folder /root/dataset --is_multimodal True --conv_version qwen2_base --model_name_or_path Qwen/Qwen2.5-0.5B --vision_tower google/siglip-so400m-patch14-384 --vision_tower2 --connector_type mlp2x_gelu --mm_vision_select_layer -2 --image_aspect_ratio square --attn_implementation flash_attention_2 --bf16 True --training_recipe common --tune_type_llm full --tune_type_vision_tower frozen --tune_vision_tower_from_layer 0 --tune_type_connector full --group_by_modality_length True --pretrained_model_path /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain --output_dir /nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation --num_train_epochs 1 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --gradient_accumulation_steps 4 --evaluation_strategy no --learning_rate 9e-1 --weight_decay 0. --warmup_ratio 0.03 --lr_scheduler_type cosine --logging_steps 1 --tf32 False --model_max_length 2048 --gradient_checkpointing True --dataloader_num_workers 8 --lazy_preprocess True --report_to tensorboard --tokenizer_use_fast False --run_name tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune --subnet_mode_text both --subnet_type_text None --mask_type_text soft --init_mean_text 3.0 --temperature_attn_text 0.5 --temperature_mlp_text 0.5 --backward_type_text normal --masked_layers_text all --subnet_mode_vision both --subnet_type_vision None --mask_type_vision soft --init_mean_vision 3.0 --temperature_attn_vision 0.5 --temperature_mlp_vision 0.5 --backward_type_vision normal --masked_layers_vision all --subnet_type_connector global --mask_type_connector soft --init_mean_connector 3.0 --temperature_connector 0.5 --backward_type_connector normal --mm_projector_lr 9e-1 --seed 42 --mask_model llm-connector --save_strategy steps --save_steps 50000 --save_total_limit 1 --train_data_ratio 0.1 +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 10:22:26,217] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:22:27,250] [INFO] [launch.py:138:main] 0 NCCL_VERSION=2.21.5 +[2025-10-09 10:22:27,250] [INFO] [launch.py:138:main] 0 NCCL_SOCKET_IFNAME=eth +[2025-10-09 10:22:27,250] [INFO] [launch.py:145:main] WORLD INFO DICT: {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]} +[2025-10-09 10:22:27,250] [INFO] [launch.py:151:main] nnodes=1, num_local_procs=8, node_rank=0 +[2025-10-09 10:22:27,250] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'localhost': [0, 1, 2, 3, 4, 5, 6, 7]}) +[2025-10-09 10:22:27,250] [INFO] [launch.py:163:main] dist_world_size=8 +[2025-10-09 10:22:27,250] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +[2025-10-09 10:22:27,252] [INFO] [launch.py:253:main] process 1158262 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=0', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:22:27,254] [INFO] [launch.py:253:main] process 1158263 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=1', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:22:27,256] [INFO] [launch.py:253:main] process 1158264 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=2', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:22:27,258] [INFO] [launch.py:253:main] process 1158265 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=3', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:22:27,260] [INFO] [launch.py:253:main] process 1158266 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=4', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:22:27,262] [INFO] [launch.py:253:main] process 1158267 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=5', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:22:27,264] [INFO] [launch.py:253:main] process 1158268 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=6', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +[2025-10-09 10:22:27,266] [INFO] [launch.py:253:main] process 1158269 spawned with command: ['/opt/conda/envs/tinyllava/bin/python3.10', '-u', 'tinyllava/train/train.py', '--local_rank=7', '--deepspeed', './scripts/zero3.json', '--data_path', '/root/dataset/text_files/llava_v1_5_mix665k.json', '--image_folder', '/root/dataset', '--is_multimodal', 'True', '--conv_version', 'qwen2_base', '--model_name_or_path', 'Qwen/Qwen2.5-0.5B', '--vision_tower', 'google/siglip-so400m-patch14-384', '--vision_tower2', '', '--connector_type', 'mlp2x_gelu', '--mm_vision_select_layer', '-2', '--image_aspect_ratio', 'square', '--attn_implementation', 'flash_attention_2', '--bf16', 'True', '--training_recipe', 'common', '--tune_type_llm', 'full', '--tune_type_vision_tower', 'frozen', '--tune_vision_tower_from_layer', '0', '--tune_type_connector', 'full', '--group_by_modality_length', 'True', '--pretrained_model_path', '/nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain', '--output_dir', '/nfs/ywang29/TinyLLaVA/checkpoints/qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation', '--num_train_epochs', '1', '--per_device_train_batch_size', '4', '--per_device_eval_batch_size', '4', '--gradient_accumulation_steps', '4', '--evaluation_strategy', 'no', '--learning_rate', '9e-1', '--weight_decay', '0.', '--warmup_ratio', '0.03', '--lr_scheduler_type', 'cosine', '--logging_steps', '1', '--tf32', 'False', '--model_max_length', '2048', '--gradient_checkpointing', 'True', '--dataloader_num_workers', '8', '--lazy_preprocess', 'True', '--report_to', 'tensorboard', '--tokenizer_use_fast', 'False', '--run_name', 'tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-qwen2.5-0_5b_base-masktune', '--subnet_mode_text', 'both', '--subnet_type_text', 'None', '--mask_type_text', 'soft', '--init_mean_text', '3.0', '--temperature_attn_text', '0.5', '--temperature_mlp_text', '0.5', '--backward_type_text', 'normal', '--masked_layers_text', 'all', '--subnet_mode_vision', 'both', '--subnet_type_vision', 'None', '--mask_type_vision', 'soft', '--init_mean_vision', '3.0', '--temperature_attn_vision', '0.5', '--temperature_mlp_vision', '0.5', '--backward_type_vision', 'normal', '--masked_layers_vision', 'all', '--subnet_type_connector', 'global', '--mask_type_connector', 'soft', '--init_mean_connector', '3.0', '--temperature_connector', '0.5', '--backward_type_connector', 'normal', '--mm_projector_lr', '9e-1', '--seed', '42', '--mask_model', 'llm-connector', '--save_strategy', 'steps', '--save_steps', '50000', '--save_total_limit', '1', '--train_data_ratio', '0.1'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/cuda/__init__.py:51: FutureWarning: The pynvml package is deprecated. Please install nvidia-ml-py instead. If you did not install pynvml directly, please report this to the maintainers of the package that installed pynvml for you. + import pynvml # type: ignore[import] +[2025-10-09 10:22:33,833] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:22:34,100] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:22:34,137] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:22:34,165] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:22:34,188] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:22:34,188] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:22:34,192] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:22:34,197] [INFO] [real_accelerator.py:191:get_accelerator] Setting ds_accelerator to cuda (auto detect) +[2025-10-09 10:22:34,250] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:22:34,523] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:22:34,523] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl +[2025-10-09 10:22:34,556] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:22:34,590] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:22:34,606] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:22:34,607] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:22:34,610] [INFO] [comm.py:637:init_distributed] cdb=None +[2025-10-09 10:22:34,611] [INFO] [comm.py:637:init_distributed] cdb=None +Apply masks for the following modules: ['llm', 'connector'] +{'llm': {'model_name_or_path': 'Qwen/Qwen2.5-0.5B', 'cache_dir': None, 'attn_implementation': 'flash_attention_2', 'subnet_mode': 'both', 'subnet_type': 'None', 'sparsity_attn': None, 'sparsity_mlp': None, 'threshold_attn': None, 'threshold_mlp': None, 'temperature_attn': 0.5, 'temperature_mlp': 0.5, 'masked_layers': 'all', 'mask_type': 'soft', 'backward_type': 'normal'}, 'vision_tower': {'model_name_or_path': 'google/siglip-so400m-patch14-384'}, 'connector': {'connector_type': 'mlp2x_gelu', 'subnet_type': 'global', 'threshold': None, 'sparsity': None, 'temperature': 0.5, 'mask_type': 'soft', 'backward_type': 'normal'}} +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +Apply masks for the following modules: ['llm', 'connector'] +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/huggingface_hub/file_download.py:945: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`. + warnings.warn( +TinyLlavaConfig { + "backward_type_connector": "normal", + "cache_dir": null, + "connector_type": "mlp2x_gelu", + "hidden_size": 896, + "ignore_index": -100, + "image_aspect_ratio": "square", + "image_token_index": -200, + "llm_model_name_or_path": "Qwen/Qwen2.5-0.5B", + "mask_model": [ + "llm", + "connector" + ], + "mask_type_connector": "soft", + "model_type": "tinyllava", + "num_queries": 128, + "num_resampler_layers": 3, + "pad_token": null, + "resampler_hidden_size": 768, + "sparsity_connector": null, + "subnet_type_connector": "global", + "temperature_connector": 0.5, + "text_config": { + "_name_or_path": "Qwen/Qwen2.5-0.5B", + "architectures": [ + "Qwen2ForCausalLM" + ], + "backward_type": "normal", + "bos_token_id": 151643, + "eos_token_id": 151643, + "hidden_size": 896, + "intermediate_size": 4864, + "mask_type": "soft", + "masked_layers": "all", + "max_position_embeddings": 32768, + "max_window_layers": 24, + "model_type": "qwen2", + "num_attention_heads": 14, + "num_hidden_layers": 24, + "num_key_value_heads": 2, + "rope_theta": 1000000.0, + "sliding_window": 32768, + "subnet_mode": "both", + "subnet_type": "None", + "temperature_attn": 0.5, + "temperature_mlp": 0.5, + "tie_word_embeddings": true, + "torch_dtype": "bfloat16", + "use_mrope": false, + "use_sliding_window": false, + "vocab_size": 151936 + }, + "threshold_connector": null, + "tokenizer_model_max_length": 2048, + "tokenizer_name_or_path": "Qwen/Qwen2.5-0.5B", + "tokenizer_padding_side": "right", + "tokenizer_use_fast": false, + "transformers_version": "4.40.1", + "tune_type_connector": "frozen", + "tune_type_llm": "frozen", + "tune_type_vision_tower": "frozen", + "tune_vision_tower_from_layer": -1, + "use_cache": false, + "vision_config": { + "hidden_act": "gelu_pytorch_tanh", + "hidden_size": 1152, + "image_size": 384, + "intermediate_size": 4304, + "layer_norm_eps": 1e-06, + "model_name_or_path": "google/siglip-so400m-patch14-384", + "model_name_or_path2": "", + "model_type": "siglip_vision_model", + "num_attention_heads": 16, + "num_hidden_layers": 27, + "patch_size": 14 + }, + "vision_feature_layer": -2, + "vision_feature_select_strategy": "patch", + "vision_hidden_size": 1152, + "vision_model_name_or_path": "google/siglip-so400m-patch14-384", + "vision_model_name_or_path2": "", + "vocab_size": 151936 +} + +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1158262:1158262 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158262:1158262 [0] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158262:1158262 [0] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1158262:1158262 [0] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1158262:1158262 [0] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1158262:1158262 [0] NCCL INFO cudaDriverVersion 12040 +NCCL version 2.21.5+cuda12.1 +Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained. +/opt/conda/envs/tinyllava/lib/python3.10/site-packages/torch/_utils.py:831: UserWarning: TypedStorage is deprecated. It will be removed in the future and UntypedStorage will be the only storage class. This should only matter to you if you are using storages directly. To access UntypedStorage directly, use tensor.untyped_storage() instead of tensor.storage() + return self.fget.__get__(instance, owner)() +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1158266:1158266 [4] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1158266:1158266 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158266:1158266 [4] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158266:1158266 [4] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1158266:1158266 [4] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1158266:1158266 [4] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1158263:1158263 [1] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1158263:1158263 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158263:1158263 [1] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158263:1158263 [1] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1158263:1158263 [1] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1158263:1158263 [1] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1158269:1158269 [7] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1158269:1158269 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158269:1158269 [7] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158269:1158269 [7] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1158269:1158269 [7] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1158269:1158269 [7] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1158264:1158264 [2] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1158264:1158264 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158264:1158264 [2] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158264:1158264 [2] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1158264:1158264 [2] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1158264:1158264 [2] NCCL INFO NET/Plugin: Using internal network plugin. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1158265:1158265 [3] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1158265:1158265 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158265:1158265 [3] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158265:1158265 [3] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1158265:1158265 [3] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1158265:1158265 [3] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1158268:1158268 [6] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1158268:1158268 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158268:1158268 [6] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158268:1158268 [6] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1158268:1158268 [6] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1158268:1158268 [6] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Using network Socket +You are attempting to use Flash Attention 2.0 with a model not initialized on GPU. Make sure to move the model to GPU after initializing it on CPU with `model.to('cuda')`. +ywang29-vrdb-test1-worker-0:1158267:1158267 [5] NCCL INFO cudaDriverVersion 12040 +ywang29-vrdb-test1-worker-0:1158267:1158267 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158267:1158267 [5] NCCL INFO Bootstrap : Using eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158267:1158267 [5] NCCL INFO NET/Plugin: No plugin found (libnccl-net.so) +ywang29-vrdb-test1-worker-0:1158267:1158267 [5] NCCL INFO NET/Plugin: Plugin load returned 2 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-net.so +ywang29-vrdb-test1-worker-0:1158267:1158267 [5] NCCL INFO NET/Plugin: Using internal network plugin. +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO NET/IB : No device found. +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO NCCL_SOCKET_IFNAME set by environment to eth +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO NET/Socket : Using [0]eth0:10.200.186.45<0> +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Using non-device net plugin version 0 +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Using network Socket +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO ncclCommInitRank comm 0x561c4a4adcd0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xbd9b48e343915a1d - Init START +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO ncclCommInitRank comm 0x560a17691540 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xbd9b48e343915a1d - Init START +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO ncclCommInitRank comm 0x55dcf3a644c0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xbd9b48e343915a1d - Init START +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO ncclCommInitRank comm 0x55a116993910 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xbd9b48e343915a1d - Init START +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO ncclCommInitRank comm 0x5597c9289740 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xbd9b48e343915a1d - Init START +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO ncclCommInitRank comm 0x55ff54c2cc60 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xbd9b48e343915a1d - Init START +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO ncclCommInitRank comm 0x5628e84ce3e0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xbd9b48e343915a1d - Init START +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO ncclCommInitRank comm 0x5641883eeeb0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xbd9b48e343915a1d - Init START +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Setting affinity for GPU 7 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO NVLS multicast support is not available on dev 7 +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Setting affinity for GPU 1 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO NVLS multicast support is not available on dev 1 +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Setting affinity for GPU 6 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO NVLS multicast support is not available on dev 6 +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Setting affinity for GPU 3 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO NVLS multicast support is not available on dev 3 +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Setting affinity for GPU 4 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO NVLS multicast support is not available on dev 4 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Setting affinity for GPU 0 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO NVLS multicast support is not available on dev 0 +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Setting affinity for GPU 5 to ffffff00,0000ffff,ff000000 +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO NVLS multicast support is not available on dev 5 +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Setting affinity for GPU 2 to ff,ffff0000,00ffffff +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO NVLS multicast support is not available on dev 2 +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO comm 0x55a116993910 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO comm 0x560a17691540 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO comm 0x55dcf3a644c0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO comm 0x5641883eeeb0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO comm 0x561c4a4adcd0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO comm 0x55ff54c2cc60 rank 1 nRanks 8 nNodes 1 localRanks 8 localRank 1 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO comm 0x5628e84ce3e0 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO comm 0x5597c9289740 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Trees [0] 2/-1/-1->1->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1158269:1159852 [7] NCCL INFO ncclCommInitRank comm 0x5597c9289740 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xbd9b48e343915a1d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158267:1159872 [5] NCCL INFO ncclCommInitRank comm 0x560a17691540 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xbd9b48e343915a1d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1158263:1159851 [1] NCCL INFO ncclCommInitRank comm 0x55ff54c2cc60 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xbd9b48e343915a1d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1158265:1159861 [3] NCCL INFO ncclCommInitRank comm 0x561c4a4adcd0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xbd9b48e343915a1d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1158268:1159868 [6] NCCL INFO ncclCommInitRank comm 0x55dcf3a644c0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xbd9b48e343915a1d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1158266:1159850 [4] NCCL INFO ncclCommInitRank comm 0x5641883eeeb0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xbd9b48e343915a1d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1158264:1159853 [2] NCCL INFO ncclCommInitRank comm 0x55a116993910 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xbd9b48e343915a1d - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO TUNER/Plugin: Plugin load returned 11 : libnccl-net.so: cannot open shared object file: No such file or directory : when loading libnccl-tuner.so +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO TUNER/Plugin: Using internal tuner plugin. +ywang29-vrdb-test1-worker-0:1158262:1159849 [0] NCCL INFO ncclCommInitRank comm 0x5628e84ce3e0 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xbd9b48e343915a1d - Init COMPLETE +[2025-10-09 10:23:22,846] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 459, num_elems = 0.99B +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +Some weights of Qwen2ForCausalLM were not initialized from the model checkpoint at /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model and are newly initialized: ['model.layers.0.mlp.down_proj.scores', 'model.layers.0.mlp.gate_proj.scores', 'model.layers.0.mlp.up_proj.scores', 'model.layers.0.self_attn.k_proj.scores', 'model.layers.0.self_attn.o_proj.scores', 'model.layers.0.self_attn.q_proj.scores', 'model.layers.0.self_attn.v_proj.scores', 'model.layers.1.mlp.down_proj.scores', 'model.layers.1.mlp.gate_proj.scores', 'model.layers.1.mlp.up_proj.scores', 'model.layers.1.self_attn.k_proj.scores', 'model.layers.1.self_attn.o_proj.scores', 'model.layers.1.self_attn.q_proj.scores', 'model.layers.1.self_attn.v_proj.scores', 'model.layers.10.mlp.down_proj.scores', 'model.layers.10.mlp.gate_proj.scores', 'model.layers.10.mlp.up_proj.scores', 'model.layers.10.self_attn.k_proj.scores', 'model.layers.10.self_attn.o_proj.scores', 'model.layers.10.self_attn.q_proj.scores', 'model.layers.10.self_attn.v_proj.scores', 'model.layers.11.mlp.down_proj.scores', 'model.layers.11.mlp.gate_proj.scores', 'model.layers.11.mlp.up_proj.scores', 'model.layers.11.self_attn.k_proj.scores', 'model.layers.11.self_attn.o_proj.scores', 'model.layers.11.self_attn.q_proj.scores', 'model.layers.11.self_attn.v_proj.scores', 'model.layers.12.mlp.down_proj.scores', 'model.layers.12.mlp.gate_proj.scores', 'model.layers.12.mlp.up_proj.scores', 'model.layers.12.self_attn.k_proj.scores', 'model.layers.12.self_attn.o_proj.scores', 'model.layers.12.self_attn.q_proj.scores', 'model.layers.12.self_attn.v_proj.scores', 'model.layers.13.mlp.down_proj.scores', 'model.layers.13.mlp.gate_proj.scores', 'model.layers.13.mlp.up_proj.scores', 'model.layers.13.self_attn.k_proj.scores', 'model.layers.13.self_attn.o_proj.scores', 'model.layers.13.self_attn.q_proj.scores', 'model.layers.13.self_attn.v_proj.scores', 'model.layers.14.mlp.down_proj.scores', 'model.layers.14.mlp.gate_proj.scores', 'model.layers.14.mlp.up_proj.scores', 'model.layers.14.self_attn.k_proj.scores', 'model.layers.14.self_attn.o_proj.scores', 'model.layers.14.self_attn.q_proj.scores', 'model.layers.14.self_attn.v_proj.scores', 'model.layers.15.mlp.down_proj.scores', 'model.layers.15.mlp.gate_proj.scores', 'model.layers.15.mlp.up_proj.scores', 'model.layers.15.self_attn.k_proj.scores', 'model.layers.15.self_attn.o_proj.scores', 'model.layers.15.self_attn.q_proj.scores', 'model.layers.15.self_attn.v_proj.scores', 'model.layers.16.mlp.down_proj.scores', 'model.layers.16.mlp.gate_proj.scores', 'model.layers.16.mlp.up_proj.scores', 'model.layers.16.self_attn.k_proj.scores', 'model.layers.16.self_attn.o_proj.scores', 'model.layers.16.self_attn.q_proj.scores', 'model.layers.16.self_attn.v_proj.scores', 'model.layers.17.mlp.down_proj.scores', 'model.layers.17.mlp.gate_proj.scores', 'model.layers.17.mlp.up_proj.scores', 'model.layers.17.self_attn.k_proj.scores', 'model.layers.17.self_attn.o_proj.scores', 'model.layers.17.self_attn.q_proj.scores', 'model.layers.17.self_attn.v_proj.scores', 'model.layers.18.mlp.down_proj.scores', 'model.layers.18.mlp.gate_proj.scores', 'model.layers.18.mlp.up_proj.scores', 'model.layers.18.self_attn.k_proj.scores', 'model.layers.18.self_attn.o_proj.scores', 'model.layers.18.self_attn.q_proj.scores', 'model.layers.18.self_attn.v_proj.scores', 'model.layers.19.mlp.down_proj.scores', 'model.layers.19.mlp.gate_proj.scores', 'model.layers.19.mlp.up_proj.scores', 'model.layers.19.self_attn.k_proj.scores', 'model.layers.19.self_attn.o_proj.scores', 'model.layers.19.self_attn.q_proj.scores', 'model.layers.19.self_attn.v_proj.scores', 'model.layers.2.mlp.down_proj.scores', 'model.layers.2.mlp.gate_proj.scores', 'model.layers.2.mlp.up_proj.scores', 'model.layers.2.self_attn.k_proj.scores', 'model.layers.2.self_attn.o_proj.scores', 'model.layers.2.self_attn.q_proj.scores', 'model.layers.2.self_attn.v_proj.scores', 'model.layers.20.mlp.down_proj.scores', 'model.layers.20.mlp.gate_proj.scores', 'model.layers.20.mlp.up_proj.scores', 'model.layers.20.self_attn.k_proj.scores', 'model.layers.20.self_attn.o_proj.scores', 'model.layers.20.self_attn.q_proj.scores', 'model.layers.20.self_attn.v_proj.scores', 'model.layers.21.mlp.down_proj.scores', 'model.layers.21.mlp.gate_proj.scores', 'model.layers.21.mlp.up_proj.scores', 'model.layers.21.self_attn.k_proj.scores', 'model.layers.21.self_attn.o_proj.scores', 'model.layers.21.self_attn.q_proj.scores', 'model.layers.21.self_attn.v_proj.scores', 'model.layers.22.mlp.down_proj.scores', 'model.layers.22.mlp.gate_proj.scores', 'model.layers.22.mlp.up_proj.scores', 'model.layers.22.self_attn.k_proj.scores', 'model.layers.22.self_attn.o_proj.scores', 'model.layers.22.self_attn.q_proj.scores', 'model.layers.22.self_attn.v_proj.scores', 'model.layers.23.mlp.down_proj.scores', 'model.layers.23.mlp.gate_proj.scores', 'model.layers.23.mlp.up_proj.scores', 'model.layers.23.self_attn.k_proj.scores', 'model.layers.23.self_attn.o_proj.scores', 'model.layers.23.self_attn.q_proj.scores', 'model.layers.23.self_attn.v_proj.scores', 'model.layers.3.mlp.down_proj.scores', 'model.layers.3.mlp.gate_proj.scores', 'model.layers.3.mlp.up_proj.scores', 'model.layers.3.self_attn.k_proj.scores', 'model.layers.3.self_attn.o_proj.scores', 'model.layers.3.self_attn.q_proj.scores', 'model.layers.3.self_attn.v_proj.scores', 'model.layers.4.mlp.down_proj.scores', 'model.layers.4.mlp.gate_proj.scores', 'model.layers.4.mlp.up_proj.scores', 'model.layers.4.self_attn.k_proj.scores', 'model.layers.4.self_attn.o_proj.scores', 'model.layers.4.self_attn.q_proj.scores', 'model.layers.4.self_attn.v_proj.scores', 'model.layers.5.mlp.down_proj.scores', 'model.layers.5.mlp.gate_proj.scores', 'model.layers.5.mlp.up_proj.scores', 'model.layers.5.self_attn.k_proj.scores', 'model.layers.5.self_attn.o_proj.scores', 'model.layers.5.self_attn.q_proj.scores', 'model.layers.5.self_attn.v_proj.scores', 'model.layers.6.mlp.down_proj.scores', 'model.layers.6.mlp.gate_proj.scores', 'model.layers.6.mlp.up_proj.scores', 'model.layers.6.self_attn.k_proj.scores', 'model.layers.6.self_attn.o_proj.scores', 'model.layers.6.self_attn.q_proj.scores', 'model.layers.6.self_attn.v_proj.scores', 'model.layers.7.mlp.down_proj.scores', 'model.layers.7.mlp.gate_proj.scores', 'model.layers.7.mlp.up_proj.scores', 'model.layers.7.self_attn.k_proj.scores', 'model.layers.7.self_attn.o_proj.scores', 'model.layers.7.self_attn.q_proj.scores', 'model.layers.7.self_attn.v_proj.scores', 'model.layers.8.mlp.down_proj.scores', 'model.layers.8.mlp.gate_proj.scores', 'model.layers.8.mlp.up_proj.scores', 'model.layers.8.self_attn.k_proj.scores', 'model.layers.8.self_attn.o_proj.scores', 'model.layers.8.self_attn.q_proj.scores', 'model.layers.8.self_attn.v_proj.scores', 'model.layers.9.mlp.down_proj.scores', 'model.layers.9.mlp.gate_proj.scores', 'model.layers.9.mlp.up_proj.scores', 'model.layers.9.self_attn.k_proj.scores', 'model.layers.9.self_attn.o_proj.scores', 'model.layers.9.self_attn.q_proj.scores', 'model.layers.9.self_attn.v_proj.scores'] +You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +loading language model from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/language_model +[2025-10-09 10:23:24,579] [INFO] [partition_parameters.py:343:__exit__] finished initializing model - num_params = 907, num_elems = 1.42B +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading vision tower from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/vision_tower +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +TinyLlavaForConditionalGeneration( + (language_model): Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 896) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2FlashAttention2( + (q_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + (k_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (v_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=128, bias=True) + (o_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (up_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=4864, bias=False) + (down_proj): SupermaskLinearSparsity_SoftForward_Normal(in_features=4864, out_features=896, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=896, out_features=151936, bias=False) + ) + (vision_tower): SIGLIPVisionTower( + (_vision_tower): SiglipVisionModel( + (vision_model): SiglipVisionTransformer( + (embeddings): SiglipVisionEmbeddings( + (patch_embedding): Conv2d(3, 1152, kernel_size=(14, 14), stride=(14, 14), padding=valid) + (position_embedding): Embedding(729, 1152) + ) + (encoder): SiglipEncoder( + (layers): ModuleList( + (0-26): 27 x SiglipEncoderLayer( + (self_attn): SiglipAttention( + (k_proj): Linear(in_features=1152, out_features=1152, bias=True) + (v_proj): Linear(in_features=1152, out_features=1152, bias=True) + (q_proj): Linear(in_features=1152, out_features=1152, bias=True) + (out_proj): Linear(in_features=1152, out_features=1152, bias=True) + ) + (layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + (layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + ) + ) + ) + (post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (head): SiglipMultiheadAttentionPoolingHead( + (attention): MultiheadAttention( + (out_proj): NonDynamicallyQuantizableLinear(in_features=1152, out_features=1152, bias=True) + ) + (layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True) + (mlp): SiglipMLP( + (activation_fn): PytorchGELUTanh() + (fc1): Linear(in_features=1152, out_features=4304, bias=True) + (fc2): Linear(in_features=4304, out_features=1152, bias=True) + ) + ) + ) + ) + ) + (connector): MLPConnector( + (_connector): Sequential( + (0): SupermaskLinearSparsity_SoftForward_Normal(in_features=1152, out_features=896, bias=True) + (1): GELU(approximate='none') + (2): SupermaskLinearSparsity_SoftForward_Normal(in_features=896, out_features=896, bias=True) + ) + ) +) +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Loading connector from /nfs/ywang29/TinyLLaVA/checkpoints/tiny-llava-Qwen2.5-0.5B-siglip-so400m-patch14-384-pretrain/connector/pytorch_model.bin... +Pre-training init language_model.model.layers.0.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.0.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.1.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.2.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.3.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.4.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.5.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.6.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.7.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.8.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.9.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.10.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.11.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.12.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.13.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.14.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.15.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.16.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.17.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.18.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.19.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.20.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.21.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.22.mlp.down_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.q_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.k_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.v_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.self_attn.o_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.gate_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.up_proj.scores: Mean=3.000000 +Pre-training init language_model.model.layers.23.mlp.down_proj.scores: Mean=3.000000 +Pre-training init connector._connector.0.scores: Mean=3.000005 +Pre-training init connector._connector.2.scores: Mean=2.999970 +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +Randomly sampled 66529 training samples (10.0% of 665298 total samples) +2025-10-09 10:23:37,616 | INFO: Total Parameters: 1283756736, Total Trainable Parameters: 359661568 +2025-10-09 10:23:37,621 | INFO: Trainable Parameters: +language_model.model.layers.0.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.0.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.0.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.0.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.0.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.1.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.1.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.1.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.1.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.1.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.2.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.2.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.2.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.2.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.2.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.3.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.3.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.3.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.3.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.3.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.4.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.4.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.4.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.4.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.4.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.5.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.5.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.5.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.5.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.5.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.6.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.6.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.6.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.6.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.6.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.7.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.7.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.7.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.7.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.7.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.8.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.8.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.8.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.8.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.8.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.9.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.9.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.9.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.9.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.9.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.10.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.10.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.10.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.10.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.10.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.11.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.11.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.11.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.11.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.11.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.12.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.12.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.12.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.12.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.12.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.13.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.13.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.13.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.13.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.13.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.14.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.14.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.14.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.14.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.14.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.15.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.15.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.15.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.15.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.15.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.16.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.16.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.16.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.16.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.16.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.17.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.17.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.17.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.17.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.17.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.18.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.18.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.18.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.18.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.18.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.19.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.19.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.19.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.19.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.19.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.20.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.20.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.20.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.20.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.20.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.21.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.21.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.21.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.21.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.21.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.22.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.22.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.22.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.22.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.22.mlp.down_proj.scores: 4358144 parameters +language_model.model.layers.23.self_attn.q_proj.scores: 802816 parameters +language_model.model.layers.23.self_attn.k_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.v_proj.scores: 114688 parameters +language_model.model.layers.23.self_attn.o_proj.scores: 802816 parameters +language_model.model.layers.23.mlp.gate_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.up_proj.scores: 4358144 parameters +language_model.model.layers.23.mlp.down_proj.scores: 4358144 parameters +connector._connector.0.scores: 1032192 parameters +connector._connector.2.scores: 802816 parameters +Parameter Offload: Total persistent parameters: 486464 in 403 params + 0%| | 0/520 [00:001->0 [1] 2/-1/-1->1->0 [2] 2/-1/-1->1->0 [3] 2/-1/-1->1->0 [4] 2/-1/-1->1->0 [5] 2/-1/-1->1->0 [6] 2/-1/-1->1->0 [7] 2/-1/-1->1->0 [8] 2/-1/-1->1->0 [9] 2/-1/-1->1->0 [10] 2/-1/-1->1->0 [11] 2/-1/-1->1->0 [12] 2/-1/-1->1->0 [13] 2/-1/-1->1->0 [14] 2/-1/-1->1->0 [15] 2/-1/-1->1->0 [16] 2/-1/-1->1->0 [17] 2/-1/-1->1->0 [18] 2/-1/-1->1->0 [19] 2/-1/-1->1->0 [20] 2/-1/-1->1->0 [21] 2/-1/-1->1->0 [22] 2/-1/-1->1->0 [23] 2/-1/-1->1->0 +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO comm 0x7f318806aa70 rank 2 nRanks 8 nNodes 1 localRanks 8 localRank 2 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO comm 0x7f15cc06a8b0 rank 7 nRanks 8 nNodes 1 localRanks 8 localRank 7 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO comm 0x7fbd1c06acf0 rank 3 nRanks 8 nNodes 1 localRanks 8 localRank 3 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO comm 0x7f071806aa00 rank 0 nRanks 8 nNodes 1 localRanks 8 localRank 0 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 00/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 01/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 02/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 03/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 04/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Trees [0] 3/-1/-1->2->1 [1] 3/-1/-1->2->1 [2] 3/-1/-1->2->1 [3] 3/-1/-1->2->1 [4] 3/-1/-1->2->1 [5] 3/-1/-1->2->1 [6] 3/-1/-1->2->1 [7] 3/-1/-1->2->1 [8] 3/-1/-1->2->1 [9] 3/-1/-1->2->1 [10] 3/-1/-1->2->1 [11] 3/-1/-1->2->1 [12] 3/-1/-1->2->1 [13] 3/-1/-1->2->1 [14] 3/-1/-1->2->1 [15] 3/-1/-1->2->1 [16] 3/-1/-1->2->1 [17] 3/-1/-1->2->1 [18] 3/-1/-1->2->1 [19] 3/-1/-1->2->1 [20] 3/-1/-1->2->1 [21] 3/-1/-1->2->1 [22] 3/-1/-1->2->1 [23] 3/-1/-1->2->1 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 05/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO comm 0x7f041c06aca0 rank 4 nRanks 8 nNodes 1 localRanks 8 localRank 4 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO comm 0x7f4aac06b0f0 rank 5 nRanks 8 nNodes 1 localRanks 8 localRank 5 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO comm 0x7f966c06adc0 rank 6 nRanks 8 nNodes 1 localRanks 8 localRank 6 MNNVL 0 +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 06/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 07/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Trees [0] -1/-1/-1->7->6 [1] -1/-1/-1->7->6 [2] -1/-1/-1->7->6 [3] -1/-1/-1->7->6 [4] -1/-1/-1->7->6 [5] -1/-1/-1->7->6 [6] -1/-1/-1->7->6 [7] -1/-1/-1->7->6 [8] -1/-1/-1->7->6 [9] -1/-1/-1->7->6 [10] -1/-1/-1->7->6 [11] -1/-1/-1->7->6 [12] -1/-1/-1->7->6 [13] -1/-1/-1->7->6 [14] -1/-1/-1->7->6 [15] -1/-1/-1->7->6 [16] -1/-1/-1->7->6 [17] -1/-1/-1->7->6 [18] -1/-1/-1->7->6 [19] -1/-1/-1->7->6 [20] -1/-1/-1->7->6 [21] -1/-1/-1->7->6 [22] -1/-1/-1->7->6 [23] -1/-1/-1->7->6 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 08/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Trees [0] 4/-1/-1->3->2 [1] 4/-1/-1->3->2 [2] 4/-1/-1->3->2 [3] 4/-1/-1->3->2 [4] 4/-1/-1->3->2 [5] 4/-1/-1->3->2 [6] 4/-1/-1->3->2 [7] 4/-1/-1->3->2 [8] 4/-1/-1->3->2 [9] 4/-1/-1->3->2 [10] 4/-1/-1->3->2 [11] 4/-1/-1->3->2 [12] 4/-1/-1->3->2 [13] 4/-1/-1->3->2 [14] 4/-1/-1->3->2 [15] 4/-1/-1->3->2 [16] 4/-1/-1->3->2 [17] 4/-1/-1->3->2 [18] 4/-1/-1->3->2 [19] 4/-1/-1->3->2 [20] 4/-1/-1->3->2 [21] 4/-1/-1->3->2 [22] 4/-1/-1->3->2 [23] 4/-1/-1->3->2 +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 09/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 10/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 11/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 12/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 13/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 14/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 15/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 16/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 17/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 18/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Trees [0] 7/-1/-1->6->5 [1] 7/-1/-1->6->5 [2] 7/-1/-1->6->5 [3] 7/-1/-1->6->5 [4] 7/-1/-1->6->5 [5] 7/-1/-1->6->5 [6] 7/-1/-1->6->5 [7] 7/-1/-1->6->5 [8] 7/-1/-1->6->5 [9] 7/-1/-1->6->5 [10] 7/-1/-1->6->5 [11] 7/-1/-1->6->5 [12] 7/-1/-1->6->5 [13] 7/-1/-1->6->5 [14] 7/-1/-1->6->5 [15] 7/-1/-1->6->5 [16] 7/-1/-1->6->5 [17] 7/-1/-1->6->5 [18] 7/-1/-1->6->5 [19] 7/-1/-1->6->5 [20] 7/-1/-1->6->5 [21] 7/-1/-1->6->5 [22] 7/-1/-1->6->5 [23] 7/-1/-1->6->5 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 19/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 20/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 21/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 22/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 23/24 : 0 1 2 3 4 5 6 7 +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Trees [0] 6/-1/-1->5->4 [1] 6/-1/-1->5->4 [2] 6/-1/-1->5->4 [3] 6/-1/-1->5->4 [4] 6/-1/-1->5->4 [5] 6/-1/-1->5->4 [6] 6/-1/-1->5->4 [7] 6/-1/-1->5->4 [8] 6/-1/-1->5->4 [9] 6/-1/-1->5->4 [10] 6/-1/-1->5->4 [11] 6/-1/-1->5->4 [12] 6/-1/-1->5->4 [13] 6/-1/-1->5->4 [14] 6/-1/-1->5->4 [15] 6/-1/-1->5->4 [16] 6/-1/-1->5->4 [17] 6/-1/-1->5->4 [18] 6/-1/-1->5->4 [19] 6/-1/-1->5->4 [20] 6/-1/-1->5->4 [21] 6/-1/-1->5->4 [22] 6/-1/-1->5->4 [23] 6/-1/-1->5->4 +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Trees [0] 5/-1/-1->4->3 [1] 5/-1/-1->4->3 [2] 5/-1/-1->4->3 [3] 5/-1/-1->4->3 [4] 5/-1/-1->4->3 [5] 5/-1/-1->4->3 [6] 5/-1/-1->4->3 [7] 5/-1/-1->4->3 [8] 5/-1/-1->4->3 [9] 5/-1/-1->4->3 [10] 5/-1/-1->4->3 [11] 5/-1/-1->4->3 [12] 5/-1/-1->4->3 [13] 5/-1/-1->4->3 [14] 5/-1/-1->4->3 [15] 5/-1/-1->4->3 [16] 5/-1/-1->4->3 [17] 5/-1/-1->4->3 [18] 5/-1/-1->4->3 [19] 5/-1/-1->4->3 [20] 5/-1/-1->4->3 [21] 5/-1/-1->4->3 [22] 5/-1/-1->4->3 [23] 5/-1/-1->4->3 +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Trees [0] 1/-1/-1->0->-1 [1] 1/-1/-1->0->-1 [2] 1/-1/-1->0->-1 [3] 1/-1/-1->0->-1 [4] 1/-1/-1->0->-1 [5] 1/-1/-1->0->-1 [6] 1/-1/-1->0->-1 [7] 1/-1/-1->0->-1 [8] 1/-1/-1->0->-1 [9] 1/-1/-1->0->-1 [10] 1/-1/-1->0->-1 [11] 1/-1/-1->0->-1 [12] 1/-1/-1->0->-1 [13] 1/-1/-1->0->-1 [14] 1/-1/-1->0->-1 [15] 1/-1/-1->0->-1 [16] 1/-1/-1->0->-1 [17] 1/-1/-1->0->-1 [18] 1/-1/-1->0->-1 [19] 1/-1/-1->0->-1 [20] 1/-1/-1->0->-1 [21] 1/-1/-1->0->-1 [22] 1/-1/-1->0->-1 [23] 1/-1/-1->0->-1 +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO P2P Chunksize set to 524288 +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 00/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 01/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 02/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 03/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 04/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 05/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 06/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 07/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 08/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 09/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 10/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 11/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 12/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 13/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 14/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 15/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 16/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 17/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 18/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 19/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 20/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 21/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 00/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 00/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 00/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 00/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 00/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 00/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 00/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 01/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 22/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 01/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 01/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 01/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 01/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 01/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 01/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 02/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 23/0 : 1[1] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 02/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 02/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 02/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 02/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 02/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 02/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 03/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 03/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 03/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 03/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 03/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 03/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 04/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 04/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 04/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 04/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 04/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 04/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 05/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 05/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 05/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 05/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 05/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 05/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 06/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 06/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 06/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 06/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 06/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 06/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 07/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 07/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 07/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 07/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 07/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 07/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 08/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 08/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 08/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 08/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 08/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 08/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 09/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 09/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 09/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 09/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 09/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 10/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 09/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 10/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 10/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 10/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 10/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 10/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 11/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 11/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 11/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 11/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 11/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 12/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 12/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 12/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 11/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 12/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 13/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 12/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 13/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 13/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 13/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 12/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 14/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 13/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 14/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 14/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 14/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 13/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 15/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 14/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 15/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 15/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 15/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 14/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 16/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 15/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 16/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 16/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 16/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 15/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 17/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 16/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 17/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 17/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 17/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 16/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 17/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 18/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 18/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 18/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 18/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 17/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 19/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 18/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 19/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 19/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 19/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 20/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 18/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 20/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 20/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 19/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 20/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 21/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 19/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 21/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 21/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 20/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 21/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 22/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 20/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 22/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 22/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 22/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 21/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 23/0 : 7[7] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 23/0 : 6[6] -> 7[7] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Channel 23/0 : 0[0] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 23/0 : 2[2] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 21/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 22/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 22/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 23/0 : 4[4] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 23/0 : 5[5] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 03/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 04/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 05/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 06/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 07/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 08/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 09/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 10/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 11/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 12/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 13/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 14/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 15/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 16/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 17/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 18/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 19/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 20/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 21/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 22/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 23/0 : 3[3] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 00/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Connected all rings +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 01/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 02/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 00/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 01/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 02/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 03/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 03/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 04/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 04/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 05/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 05/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 06/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 06/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 07/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 07/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 08/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 08/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 00/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 09/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 09/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 00/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 01/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 10/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 10/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 01/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 02/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 11/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 11/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 02/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 00/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 12/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 03/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 01/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 04/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 13/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 02/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 05/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 03/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 14/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 06/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 04/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 03/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 15/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 07/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 05/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 04/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 16/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 08/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 06/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 05/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 17/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 09/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 07/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 06/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 18/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 10/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 00/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 08/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 07/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 11/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 01/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 19/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 09/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 08/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 12/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 02/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 10/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 12/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 20/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 09/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 13/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 03/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 11/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 10/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 13/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 21/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 14/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 04/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 12/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 11/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 14/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 15/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 22/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 05/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 13/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 12/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 15/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 16/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Channel 23/0 : 1[1] -> 0[0] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 06/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 14/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 13/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 16/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 17/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 07/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 15/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 14/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 17/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 18/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 08/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 16/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 15/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 18/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 19/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 09/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 17/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 16/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 19/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 20/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 18/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 21/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 10/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 19/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 11/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 20/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 20/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 12/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 21/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 21/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 13/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 22/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 22/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 17/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 14/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Channel 23/0 : 2[2] -> 1[1] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Channel 23/0 : 7[7] -> 6[6] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 18/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 15/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 19/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 16/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 20/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 17/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 22/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 18/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Channel 23/0 : 4[4] -> 3[3] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 21/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 19/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 22/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 00/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Channel 23/0 : 3[3] -> 2[2] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 20/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 01/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 21/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 02/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 22/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Channel 23/0 : 6[6] -> 5[5] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 03/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 04/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 05/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 06/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 07/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 08/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 09/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 10/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 11/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 12/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 13/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 14/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 15/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 16/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 17/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 18/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 19/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 20/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 21/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 22/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Channel 23/0 : 5[5] -> 4[4] via P2P/CUMEM/read +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO Connected all trees +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO threadThresholds 8/8/64 | 64/8/64 | 512 | 512 +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO 24 coll channels, 24 collnet channels, 0 nvls channels, 32 p2p channels, 32 p2p channels per peer +ywang29-vrdb-test1-worker-0:1158268:1164803 [6] NCCL INFO ncclCommInitRank comm 0x7f966c06adc0 rank 6 nranks 8 cudaDev 6 nvmlDev 6 busId a01c0 commId 0xc816ca9acfc1fbf9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158265:1164800 [3] NCCL INFO ncclCommInitRank comm 0x7fbd1c06acf0 rank 3 nranks 8 cudaDev 3 nvmlDev 3 busId 201d0 commId 0xc816ca9acfc1fbf9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158263:1164799 [1] NCCL INFO ncclCommInitRank comm 0x7f3b3006b1d0 rank 1 nranks 8 cudaDev 1 nvmlDev 1 busId 101d0 commId 0xc816ca9acfc1fbf9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158269:1164802 [7] NCCL INFO ncclCommInitRank comm 0x7f15cc06a8b0 rank 7 nranks 8 cudaDev 7 nvmlDev 7 busId a01d0 commId 0xc816ca9acfc1fbf9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158267:1164797 [5] NCCL INFO ncclCommInitRank comm 0x7f4aac06b0f0 rank 5 nranks 8 cudaDev 5 nvmlDev 5 busId 901d0 commId 0xc816ca9acfc1fbf9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158264:1164801 [2] NCCL INFO ncclCommInitRank comm 0x7f318806aa70 rank 2 nranks 8 cudaDev 2 nvmlDev 2 busId 201c0 commId 0xc816ca9acfc1fbf9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158262:1164796 [0] NCCL INFO ncclCommInitRank comm 0x7f071806aa00 rank 0 nranks 8 cudaDev 0 nvmlDev 0 busId 101c0 commId 0xc816ca9acfc1fbf9 - Init COMPLETE +ywang29-vrdb-test1-worker-0:1158266:1164798 [4] NCCL INFO ncclCommInitRank comm 0x7f041c06aca0 rank 4 nranks 8 cudaDev 4 nvmlDev 4 busId 901c0 commId 0xc816ca9acfc1fbf9 - Init COMPLETE + 0%| | 1/520 [00:42<6:08:32, 42.61s/it] {'loss': 2.0453, 'grad_norm': 0.0048338740596386405, 'learning_rate': 0.05625, 'epoch': 0.0} + 0%| | 1/520 [00:42<6:08:32, 42.61s/it] 0%| | 2/520 [00:46<2:49:53, 19.68s/it] {'loss': 2.0549, 'grad_norm': 0.005248840252691204, 'learning_rate': 0.1125, 'epoch': 0.0} + 0%| | 2/520 [00:46<2:49:53, 19.68s/it] 1%| | 3/520 [00:49<1:46:19, 12.34s/it] {'loss': 2.1899, 'grad_norm': 0.006005492894220033, 'learning_rate': 0.16875, 'epoch': 0.01} + 1%| | 3/520 [00:49<1:46:19, 12.34s/it] 1%| | 4/520 [00:53<1:16:33, 8.90s/it] {'loss': 1.667, 'grad_norm': 0.0014762144250883651, 'learning_rate': 0.225, 'epoch': 0.01} + 1%| | 4/520 [00:53<1:16:33, 8.90s/it] 1%| | 5/520 [00:57<1:00:14, 7.02s/it] {'loss': 1.654, 'grad_norm': 0.0007706540310339228, 'learning_rate': 0.28125, 'epoch': 0.01} + 1%| | 5/520 [00:57<1:00:14, 7.02s/it] 1%| | 6/520 [01:00<50:22, 5.88s/it] {'loss': 1.3715, 'grad_norm': 0.0005455520843680223, 'learning_rate': 0.3375, 'epoch': 0.01} + 1%| | 6/520 [01:00<50:22, 5.88s/it] 1%|▏ | 7/520 [01:04<44:03, 5.15s/it] {'loss': 1.4191, 'grad_norm': 0.000723042651987352, 'learning_rate': 0.39375, 'epoch': 0.01} + 1%|▏ | 7/520 [01:04<44:03, 5.15s/it] 2%|▏ | 8/520 [01:08<41:31, 4.87s/it] {'loss': 1.4557, 'grad_norm': 0.0009349160093779789, 'learning_rate': 0.45, 'epoch': 0.02} + 2%|▏ | 8/520 [01:08<41:31, 4.87s/it] 2%|▏ | 9/520 [01:12<39:42, 4.66s/it] {'loss': 1.5262, 'grad_norm': 0.0014120466481671715, 'learning_rate': 0.50625, 'epoch': 0.02} + 2%|▏ | 9/520 [01:12<39:42, 4.66s/it] 2%|▏ | 10/520 [01:16<36:55, 4.34s/it] {'loss': 1.3603, 'grad_norm': 0.002176733293708323, 'learning_rate': 0.5625, 'epoch': 0.02} + 2%|▏ | 10/520 [01:16<36:55, 4.34s/it] 2%|▏ | 11/520 [01:20<36:52, 4.35s/it] {'loss': 1.5265, 'grad_norm': 0.0038208979497463057, 'learning_rate': 0.61875, 'epoch': 0.02} + 2%|▏ | 11/520 [01:20<36:52, 4.35s/it] 2%|▏ | 12/520 [01:24<35:01, 4.14s/it] {'loss': 1.5003, 'grad_norm': 0.005594143144721575, 'learning_rate': 0.675, 'epoch': 0.02} + 2%|▏ | 12/520 [01:24<35:01, 4.14s/it][2025-10-09 10:25:11,381] [WARNING] [stage3.py:2069:step] 1 pytorch allocator cache flushes since last step. this happens when there is high memory pressure and is detrimental to performance. if this is happening frequently consider adjusting settings to reduce memory consumption. If you are unable to make the cache flushes go away consider adding get_accelerator().empty_cache() calls in your training loop to ensure that all ranks flush their caches at the same time + 2%|▎ | 13/520 [01:28<35:26, 4.19s/it] {'loss': 1.923, 'grad_norm': 0.012093784976612615, 'learning_rate': 0.7312500000000001, 'epoch': 0.03} + 2%|▎ | 13/520 [01:28<35:26, 4.19s/it] 3%|▎ | 14/520 [01:32<34:10, 4.05s/it] {'loss': 2.1011, 'grad_norm': 0.027287500507276095, 'learning_rate': 0.7875, 'epoch': 0.03} + 3%|▎ | 14/520 [01:32<34:10, 4.05s/it] 3%|▎ | 15/520 [01:36<33:33, 3.99s/it] {'loss': 1.9758, 'grad_norm': 0.018054063215176417, 'learning_rate': 0.84375, 'epoch': 0.03} + 3%|▎ | 15/520 [01:36<33:33, 3.99s/it] 3%|▎ | 16/520 [01:40<32:50, 3.91s/it] {'loss': 2.3987, 'grad_norm': 0.10427969798915696, 'learning_rate': 0.9, 'epoch': 0.03} + 3%|▎ | 16/520 [01:40<32:50, 3.91s/it] 3%|▎ | 17/520 [01:43<32:07, 3.83s/it] {'loss': 3.0826, 'grad_norm': 0.13221303825198766, 'learning_rate': 0.8999912578191922, 'epoch': 0.03} + 3%|▎ | 17/520 [01:43<32:07, 3.83s/it] 3%|▎ | 18/520 [01:47<31:40, 3.79s/it] {'loss': 7.0013, 'grad_norm': 0.5477871018671899, 'learning_rate': 0.8999650316164387, 'epoch': 0.03} + 3%|▎ | 18/520 [01:47<31:40, 3.79s/it] 4%|▎ | 19/520 [01:51<31:30, 3.77s/it] {'loss': 10.7358, 'grad_norm': 0.3909664621097939, 'learning_rate': 0.8999213224107359, 'epoch': 0.04} + 4%|▎ | 19/520 [01:51<31:30, 3.77s/it] 4%|▍ | 20/520 [01:54<31:10, 3.74s/it] {'loss': 8.9989, 'grad_norm': 0.16399589918200647, 'learning_rate': 0.8998601319003673, 'epoch': 0.04} + 4%|▍ | 20/520 [01:54<31:10, 3.74s/it] 4%|▍ | 21/520 [01:58<30:57, 3.72s/it] {'loss': 13.4042, 'grad_norm': 0.2398931404804199, 'learning_rate': 0.8997814624628374, 'epoch': 0.04} + 4%|▍ | 21/520 [01:58<30:57, 3.72s/it] 4%|▍ | 22/520 [02:02<30:45, 3.71s/it] {'loss': 13.1877, 'grad_norm': 0.2163069753897003, 'learning_rate': 0.8996853171547794, 'epoch': 0.04} + 4%|▍ | 22/520 [02:02<30:45, 3.71s/it] 4%|▍ | 23/520 [02:06<30:45, 3.71s/it] {'loss': 13.204, 'grad_norm': 0.19564500082929662, 'learning_rate': 0.899571699711836, 'epoch': 0.04} + 4%|▍ | 23/520 [02:06<30:45, 3.71s/it] 5%|▍ | 24/520 [02:09<30:27, 3.68s/it] {'loss': 7.8761, 'grad_norm': 0.04180234471616734, 'learning_rate': 0.899440614548515, 'epoch': 0.05} + 5%|▍ | 24/520 [02:09<30:27, 3.68s/it] 5%|▍ | 25/520 [02:13<30:27, 3.69s/it] {'loss': 7.4545, 'grad_norm': 0.025642718417424555, 'learning_rate': 0.8992920667580175, 'epoch': 0.05} + 5%|▍ | 25/520 [02:13<30:27, 3.69s/it] 5%|▌ | 26/520 [02:17<30:27, 3.70s/it] {'loss': 6.7496, 'grad_norm': 0.015703656673161287, 'learning_rate': 0.8991260621120394, 'epoch': 0.05} + 5%|▌ | 26/520 [02:17<30:27, 3.70s/it] 5%|▌ | 27/520 [02:20<30:27, 3.71s/it] {'loss': 6.0479, 'grad_norm': 0.019077938287565762, 'learning_rate': 0.898942607060548, 'epoch': 0.05} + 5%|▌ | 27/520 [02:20<30:27, 3.71s/it] 5%|▌ | 28/520 [02:24<30:26, 3.71s/it] {'loss': 5.7078, 'grad_norm': 0.024111906408078865, 'learning_rate': 0.898741708731531, 'epoch': 0.05} + 5%|▌ | 28/520 [02:24<30:26, 3.71s/it] 6%|▌ | 29/520 [02:28<30:21, 3.71s/it] {'loss': 5.6627, 'grad_norm': 0.025255410317398884, 'learning_rate': 0.8985233749307198, 'epoch': 0.06} + 6%|▌ | 29/520 [02:28<30:21, 3.71s/it] 6%|▌ | 30/520 [02:31<30:17, 3.71s/it] {'loss': 6.4683, 'grad_norm': 0.03077555671252308, 'learning_rate': 0.8982876141412856, 'epoch': 0.06} + 6%|▌ | 30/520 [02:31<30:17, 3.71s/it] 6%|▌ | 31/520 [02:35<30:16, 3.71s/it] {'loss': 5.0971, 'grad_norm': 0.015108845787871458, 'learning_rate': 0.8980344355235101, 'epoch': 0.06} + 6%|▌ | 31/520 [02:35<30:16, 3.71s/it] 6%|▌ | 32/520 [02:39<30:04, 3.70s/it] {'loss': 7.0923, 'grad_norm': 0.008797139783594277, 'learning_rate': 0.8977638489144307, 'epoch': 0.06} + 6%|▌ | 32/520 [02:39<30:04, 3.70s/it] 6%|▋ | 33/520 [02:43<30:03, 3.70s/it] {'loss': 5.2685, 'grad_norm': 0.005230128235979267, 'learning_rate': 0.8974758648274559, 'epoch': 0.06} + 6%|▋ | 33/520 [02:43<30:03, 3.70s/it] 7%|▋ | 34/520 [02:46<30:01, 3.71s/it] {'loss': 4.9304, 'grad_norm': 0.008229691490971303, 'learning_rate': 0.8971704944519593, 'epoch': 0.07} + 7%|▋ | 34/520 [02:46<30:01, 3.71s/it] 7%|▋ | 35/520 [02:50<29:55, 3.70s/it] {'loss': 5.0536, 'grad_norm': 0.006783232119001029, 'learning_rate': 0.8968477496528428, 'epoch': 0.07} + 7%|▋ | 35/520 [02:50<29:55, 3.70s/it] 7%|▋ | 36/520 [02:54<29:50, 3.70s/it] {'loss': 5.3002, 'grad_norm': 0.006108912602667358, 'learning_rate': 0.8965076429700775, 'epoch': 0.07} + 7%|▋ | 36/520 [02:54<29:50, 3.70s/it] 7%|▋ | 37/520 [02:57<30:08, 3.74s/it] {'loss': 5.7149, 'grad_norm': 0.007347941863955286, 'learning_rate': 0.8961501876182147, 'epoch': 0.07} + 7%|▋ | 37/520 [02:57<30:08, 3.74s/it] 7%|▋ | 38/520 [03:01<30:23, 3.78s/it] {'loss': 5.1978, 'grad_norm': 0.003936825027957106, 'learning_rate': 0.8957753974858738, 'epoch': 0.07} + 7%|▋ | 38/520 [03:01<30:23, 3.78s/it] 8%|▊ | 39/520 [03:05<30:21, 3.79s/it] {'loss': 4.719, 'grad_norm': 0.009016900570419777, 'learning_rate': 0.8953832871352018, 'epoch': 0.07} + 8%|▊ | 39/520 [03:05<30:21, 3.79s/it] 8%|▊ | 40/520 [03:09<30:00, 3.75s/it] {'loss': 4.5838, 'grad_norm': 0.009638311205601983, 'learning_rate': 0.8949738718013078, 'epoch': 0.08} + 8%|▊ | 40/520 [03:09<30:00, 3.75s/it] 8%|▊ | 41/520 [03:13<29:57, 3.75s/it] {'loss': 4.7221, 'grad_norm': 0.004002081701981651, 'learning_rate': 0.8945471673916716, 'epoch': 0.08} + 8%|▊ | 41/520 [03:13<29:57, 3.75s/it] 8%|▊ | 42/520 [03:16<29:45, 3.74s/it] {'loss': 5.0335, 'grad_norm': 0.006212653730867397, 'learning_rate': 0.8941031904855246, 'epoch': 0.08} + 8%|▊ | 42/520 [03:16<29:45, 3.74s/it] 8%|▊ | 43/520 [03:20<29:38, 3.73s/it] {'loss': 5.5813, 'grad_norm': 0.007504020230574295, 'learning_rate': 0.8936419583332061, 'epoch': 0.08} + 8%|▊ | 43/520 [03:20<29:38, 3.73s/it] 8%|▊ | 44/520 [03:24<29:27, 3.71s/it] {'loss': 5.6433, 'grad_norm': 0.003280761289096008, 'learning_rate': 0.8931634888554937, 'epoch': 0.08} + 8%|▊ | 44/520 [03:24<29:27, 3.71s/it] 9%|▊ | 45/520 [03:27<29:24, 3.71s/it] {'loss': 4.3228, 'grad_norm': 0.0018242219591213368, 'learning_rate': 0.8926678006429056, 'epoch': 0.09} + 9%|▊ | 45/520 [03:27<29:24, 3.71s/it] 9%|▉ | 46/520 [03:31<29:18, 3.71s/it] {'loss': 5.8156, 'grad_norm': 0.0045912936956755725, 'learning_rate': 0.8921549129549798, 'epoch': 0.09} + 9%|▉ | 46/520 [03:31<29:18, 3.71s/it] 9%|▉ | 47/520 [03:35<29:06, 3.69s/it] {'loss': 4.4773, 'grad_norm': 0.002570223035920976, 'learning_rate': 0.8916248457195245, 'epoch': 0.09} + 9%|▉ | 47/520 [03:35<29:06, 3.69s/it] 9%|▉ | 48/520 [03:38<28:58, 3.68s/it] {'loss': 4.2519, 'grad_norm': 0.0024277351302351, 'learning_rate': 0.8910776195318447, 'epoch': 0.09} + 9%|▉ | 48/520 [03:38<28:58, 3.68s/it] 9%|▉ | 49/520 [03:42<29:02, 3.70s/it] {'loss': 4.1557, 'grad_norm': 0.006057573432180882, 'learning_rate': 0.8905132556539418, 'epoch': 0.09} + 9%|▉ | 49/520 [03:42<29:02, 3.70s/it] 10%|▉ | 50/520 [03:46<28:51, 3.68s/it] {'loss': 4.106, 'grad_norm': 0.0027010389772407902, 'learning_rate': 0.8899317760136871, 'epoch': 0.1} + 10%|▉ | 50/520 [03:46<28:51, 3.68s/it] 10%|▉ | 51/520 [03:49<28:41, 3.67s/it] {'loss': 3.85, 'grad_norm': 0.002449128442544576, 'learning_rate': 0.88933320320397, 'epoch': 0.1} + 10%|▉ | 51/520 [03:49<28:41, 3.67s/it] 10%|█ | 52/520 [03:53<28:36, 3.67s/it] {'loss': 4.2451, 'grad_norm': 0.002555737442686249, 'learning_rate': 0.8887175604818206, 'epoch': 0.1} + 10%|█ | 52/520 [03:53<28:36, 3.67s/it] 10%|█ | 53/520 [03:57<28:55, 3.72s/it] {'loss': 4.1612, 'grad_norm': 0.0029438894985391057, 'learning_rate': 0.8880848717675055, 'epoch': 0.1} + 10%|█ | 53/520 [03:57<28:55, 3.72s/it] 10%|█ | 54/520 [04:01<29:14, 3.76s/it] {'loss': 3.6592, 'grad_norm': 0.0026755088163453963, 'learning_rate': 0.8874351616435986, 'epoch': 0.1} + 10%|█ | 54/520 [04:01<29:14, 3.76s/it] 11%|█ | 55/520 [04:05<29:17, 3.78s/it] {'loss': 3.7626, 'grad_norm': 0.003129020879273414, 'learning_rate': 0.8867684553540259, 'epoch': 0.11} + 11%|█ | 55/520 [04:05<29:17, 3.78s/it] 11%|█ | 56/520 [04:08<29:21, 3.80s/it] {'loss': 3.8513, 'grad_norm': 0.003795396060001577, 'learning_rate': 0.8860847788030851, 'epoch': 0.11} + 11%|█ | 56/520 [04:08<29:21, 3.80s/it] 11%|█ | 57/520 [04:12<29:18, 3.80s/it] {'loss': 3.6924, 'grad_norm': 0.008594337632612688, 'learning_rate': 0.8853841585544384, 'epoch': 0.11} + 11%|█ | 57/520 [04:12<29:18, 3.80s/it] 11%|█ | 58/520 [04:16<28:53, 3.75s/it] {'loss': 3.6566, 'grad_norm': 0.0076319752368770866, 'learning_rate': 0.8846666218300807, 'epoch': 0.11} + 11%|█ | 58/520 [04:16<28:53, 3.75s/it] 11%|█▏ | 59/520 [04:20<28:44, 3.74s/it] {'loss': 4.3147, 'grad_norm': 0.003607872716480238, 'learning_rate': 0.8839321965092826, 'epoch': 0.11} + 11%|█▏ | 59/520 [04:20<28:44, 3.74s/it] 12%|█▏ | 60/520 [04:23<28:39, 3.74s/it] {'loss': 3.5532, 'grad_norm': 0.006550302053420642, 'learning_rate': 0.8831809111275054, 'epoch': 0.12} + 12%|█▏ | 60/520 [04:23<28:39, 3.74s/it] 12%|█▏ | 61/520 [04:27<28:32, 3.73s/it] {'loss': 4.6477, 'grad_norm': 0.009259685985665797, 'learning_rate': 0.8824127948752949, 'epoch': 0.12} + 12%|█▏ | 61/520 [04:27<28:32, 3.73s/it] 12%|█▏ | 62/520 [04:31<28:15, 3.70s/it] {'loss': 3.4046, 'grad_norm': 0.005545799724409958, 'learning_rate': 0.8816278775971446, 'epoch': 0.12} + 12%|█▏ | 62/520 [04:31<28:15, 3.70s/it] 12%|█▏ | 63/520 [04:34<28:05, 3.69s/it] {'loss': 3.2694, 'grad_norm': 0.006551111794105284, 'learning_rate': 0.8808261897903381, 'epoch': 0.12} + 12%|█▏ | 63/520 [04:34<28:05, 3.69s/it] 12%|█▏ | 64/520 [04:38<27:55, 3.67s/it] {'loss': 3.2573, 'grad_norm': 0.007190621254447662, 'learning_rate': 0.8800077626037633, 'epoch': 0.12} + 12%|█▏ | 64/520 [04:38<27:55, 3.67s/it] 12%|█▎ | 65/520 [04:42<27:57, 3.69s/it] {'loss': 3.2621, 'grad_norm': 0.003862037378476715, 'learning_rate': 0.8791726278367021, 'epoch': 0.12} + 12%|█▎ | 65/520 [04:42<27:57, 3.69s/it] 13%|█▎ | 66/520 [04:45<27:49, 3.68s/it] {'loss': 3.2079, 'grad_norm': 0.005582204860574795, 'learning_rate': 0.8783208179375951, 'epoch': 0.13} + 13%|█▎ | 66/520 [04:45<27:49, 3.68s/it] 13%|█▎ | 67/520 [04:49<27:44, 3.67s/it] {'loss': 2.8208, 'grad_norm': 0.0029711137967415907, 'learning_rate': 0.8774523660027806, 'epoch': 0.13} + 13%|█▎ | 67/520 [04:49<27:44, 3.67s/it] 13%|█▎ | 68/520 [04:53<27:45, 3.68s/it] {'loss': 2.6891, 'grad_norm': 0.003125324646649502, 'learning_rate': 0.8765673057752094, 'epoch': 0.13} + 13%|█▎ | 68/520 [04:53<27:45, 3.68s/it] 13%|█▎ | 69/520 [04:56<27:42, 3.69s/it] {'loss': 2.7495, 'grad_norm': 0.0027596303551265427, 'learning_rate': 0.8756656716431321, 'epoch': 0.13} + 13%|█▎ | 69/520 [04:56<27:42, 3.69s/it] 13%|█▎ | 70/520 [05:00<27:38, 3.69s/it] {'loss': 2.7754, 'grad_norm': 0.003234943507539121, 'learning_rate': 0.8747474986387654, 'epoch': 0.13} + 13%|█▎ | 70/520 [05:00<27:38, 3.69s/it] 14%|█▎ | 71/520 [05:04<27:44, 3.71s/it] {'loss': 2.6296, 'grad_norm': 0.002929261556462234, 'learning_rate': 0.8738128224369286, 'epoch': 0.14} + 14%|█▎ | 71/520 [05:04<27:44, 3.71s/it] 14%|█▍ | 72/520 [05:08<27:57, 3.74s/it] {'loss': 2.7906, 'grad_norm': 0.0040224856716908515, 'learning_rate': 0.8728616793536588, 'epoch': 0.14} + 14%|█▍ | 72/520 [05:08<27:57, 3.74s/it] 14%|█▍ | 73/520 [05:12<28:08, 3.78s/it] {'loss': 2.5169, 'grad_norm': 0.004386934587392127, 'learning_rate': 0.8718941063447997, 'epoch': 0.14} + 14%|█▍ | 73/520 [05:12<28:08, 3.78s/it] 14%|█▍ | 74/520 [05:15<28:12, 3.80s/it] {'loss': 2.7124, 'grad_norm': 0.0023556283331336606, 'learning_rate': 0.8709101410045653, 'epoch': 0.14} + 14%|█▍ | 74/520 [05:15<28:12, 3.80s/it] 14%|█▍ | 75/520 [05:19<28:08, 3.79s/it] {'loss': 2.3803, 'grad_norm': 0.0013991820453828497, 'learning_rate': 0.8699098215640799, 'epoch': 0.14} + 14%|█▍ | 75/520 [05:19<28:08, 3.79s/it] 15%|█▍ | 76/520 [05:23<28:11, 3.81s/it] {'loss': 3.6296, 'grad_norm': 0.0056211431422994226, 'learning_rate': 0.8688931868898919, 'epoch': 0.15} + 15%|█▍ | 76/520 [05:23<28:11, 3.81s/it] 15%|█▍ | 77/520 [05:27<28:09, 3.81s/it] {'loss': 2.4877, 'grad_norm': 0.0024688453734347266, 'learning_rate': 0.8678602764824641, 'epoch': 0.15} + 15%|█▍ | 77/520 [05:27<28:09, 3.81s/it] 15%|█▌ | 78/520 [05:31<28:12, 3.83s/it] {'loss': 2.4907, 'grad_norm': 0.0018496589308425126, 'learning_rate': 0.8668111304746389, 'epoch': 0.15} + 15%|█▌ | 78/520 [05:31<28:12, 3.83s/it] 15%|█▌ | 79/520 [05:35<28:11, 3.84s/it] {'loss': 2.4715, 'grad_norm': 0.001962189343998098, 'learning_rate': 0.8657457896300791, 'epoch': 0.15} + 15%|█▌ | 79/520 [05:35<28:11, 3.84s/it] 15%|█▌ | 80/520 [05:38<28:09, 3.84s/it] {'loss': 3.5941, 'grad_norm': 0.008002244625811302, 'learning_rate': 0.8646642953416834, 'epoch': 0.15} + 15%|█▌ | 80/520 [05:38<28:09, 3.84s/it] 16%|█▌ | 81/520 [05:42<28:00, 3.83s/it] {'loss': 2.8382, 'grad_norm': 0.002677516391137244, 'learning_rate': 0.8635666896299793, 'epoch': 0.16} + 16%|█▌ | 81/520 [05:42<28:00, 3.83s/it] 16%|█▌ | 82/520 [05:46<27:45, 3.80s/it] {'loss': 2.5441, 'grad_norm': 0.0020627696901841437, 'learning_rate': 0.8624530151414893, 'epoch': 0.16} + 16%|█▌ | 82/520 [05:46<27:45, 3.80s/it] 16%|█▌ | 83/520 [05:50<27:24, 3.76s/it] {'loss': 2.5696, 'grad_norm': 0.0023517876318454494, 'learning_rate': 0.8613233151470745, 'epoch': 0.16} + 16%|█▌ | 83/520 [05:50<27:24, 3.76s/it] 16%|█▌ | 84/520 [05:53<27:16, 3.75s/it] {'loss': 2.5241, 'grad_norm': 0.0020175556468729782, 'learning_rate': 0.860177633540253, 'epoch': 0.16} + 16%|█▌ | 84/520 [05:53<27:16, 3.75s/it] 16%|█▋ | 85/520 [05:57<26:57, 3.72s/it] {'loss': 2.4384, 'grad_norm': 0.0018934831599413142, 'learning_rate': 0.859016014835495, 'epoch': 0.16} + 16%|█▋ | 85/520 [05:57<26:57, 3.72s/it] 17%|█▋ | 86/520 [06:01<26:45, 3.70s/it] {'loss': 2.71, 'grad_norm': 0.0017616061930115696, 'learning_rate': 0.8578385041664925, 'epoch': 0.17} + 17%|█▋ | 86/520 [06:01<26:45, 3.70s/it] 17%|█▋ | 87/520 [06:04<26:45, 3.71s/it] {'loss': 3.4798, 'grad_norm': 0.018623151446055402, 'learning_rate': 0.8566451472844065, 'epoch': 0.17} + 17%|█▋ | 87/520 [06:04<26:45, 3.71s/it] 17%|█▋ | 88/520 [06:08<26:37, 3.70s/it] {'loss': 4.3518, 'grad_norm': 0.01628749757357824, 'learning_rate': 0.8554359905560885, 'epoch': 0.17} + 17%|█▋ | 88/520 [06:08<26:37, 3.70s/it] 17%|█▋ | 89/520 [06:12<26:31, 3.69s/it] {'loss': 2.4804, 'grad_norm': 0.00310776380491732, 'learning_rate': 0.8542110809622799, 'epoch': 0.17} + 17%|█▋ | 89/520 [06:12<26:31, 3.69s/it] 17%|█▋ | 90/520 [06:15<26:32, 3.70s/it] {'loss': 2.3488, 'grad_norm': 0.00196715423235106, 'learning_rate': 0.8529704660957854, 'epoch': 0.17} + 17%|█▋ | 90/520 [06:15<26:32, 3.70s/it] 18%|█▊ | 91/520 [06:19<26:19, 3.68s/it] {'loss': 2.5072, 'grad_norm': 0.0020828955502551192, 'learning_rate': 0.8517141941596252, 'epoch': 0.17} + 18%|█▊ | 91/520 [06:19<26:19, 3.68s/it] 18%|█▊ | 92/520 [06:23<26:13, 3.68s/it] {'loss': 2.393, 'grad_norm': 0.0017365902653781622, 'learning_rate': 0.850442313965161, 'epoch': 0.18} + 18%|█▊ | 92/520 [06:23<26:13, 3.68s/it] 18%|█▊ | 93/520 [06:26<26:11, 3.68s/it] {'loss': 2.3586, 'grad_norm': 0.0016031278133598805, 'learning_rate': 0.8491548749301997, 'epoch': 0.18} + 18%|█▊ | 93/520 [06:26<26:11, 3.68s/it] 18%|█▊ | 94/520 [06:30<26:04, 3.67s/it] {'loss': 2.5721, 'grad_norm': 0.0014209763524141716, 'learning_rate': 0.8478519270770745, 'epoch': 0.18} + 18%|█▊ | 94/520 [06:30<26:04, 3.67s/it] 18%|█▊ | 95/520 [06:34<26:04, 3.68s/it] {'loss': 2.3447, 'grad_norm': 0.001498243790677533, 'learning_rate': 0.846533521030699, 'epoch': 0.18} + 18%|█▊ | 95/520 [06:34<26:04, 3.68s/it] 18%|█▊ | 96/520 [06:37<25:53, 3.66s/it] {'loss': 2.2454, 'grad_norm': 0.0013240724206766835, 'learning_rate': 0.8451997080166028, 'epoch': 0.18} + 18%|█▊ | 96/520 [06:37<25:53, 3.66s/it] 19%|█▊ | 97/520 [06:41<25:49, 3.66s/it] {'loss': 2.3374, 'grad_norm': 0.0013808521303148185, 'learning_rate': 0.8438505398589392, 'epoch': 0.19} + 19%|█▊ | 97/520 [06:41<25:49, 3.66s/it] 19%|█▉ | 98/520 [06:45<25:43, 3.66s/it] {'loss': 2.2408, 'grad_norm': 0.0014446130606449014, 'learning_rate': 0.8424860689784724, 'epoch': 0.19} + 19%|█▉ | 98/520 [06:45<25:43, 3.66s/it] 19%|█▉ | 99/520 [06:48<25:38, 3.65s/it] {'loss': 2.3389, 'grad_norm': 0.0013032768168128406, 'learning_rate': 0.8411063483905409, 'epoch': 0.19} + 19%|█▉ | 99/520 [06:48<25:38, 3.65s/it] 19%|█▉ | 100/520 [06:52<25:34, 3.65s/it] {'loss': 2.9846, 'grad_norm': 0.0027017815082330428, 'learning_rate': 0.8397114317029974, 'epoch': 0.19} + 19%|█▉ | 100/520 [06:52<25:34, 3.65s/it] 19%|█▉ | 101/520 [06:56<25:34, 3.66s/it] {'loss': 2.3203, 'grad_norm': 0.0011666069539517001, 'learning_rate': 0.8383013731141259, 'epoch': 0.19} + 19%|█▉ | 101/520 [06:56<25:34, 3.66s/it] 20%|█▉ | 102/520 [06:59<25:30, 3.66s/it] {'loss': 2.2479, 'grad_norm': 0.0011958857222550538, 'learning_rate': 0.8368762274105357, 'epoch': 0.2} + 20%|█▉ | 102/520 [06:59<25:30, 3.66s/it] 20%|█▉ | 103/520 [07:03<25:33, 3.68s/it] {'loss': 2.1475, 'grad_norm': 0.0010333987334417802, 'learning_rate': 0.8354360499650332, 'epoch': 0.2} + 20%|█▉ | 103/520 [07:03<25:33, 3.68s/it] 20%|██ | 104/520 [07:07<25:28, 3.67s/it] {'loss': 2.3045, 'grad_norm': 0.001071849247143533, 'learning_rate': 0.8339808967344701, 'epoch': 0.2} + 20%|██ | 104/520 [07:07<25:28, 3.67s/it] 20%|██ | 105/520 [07:10<25:17, 3.66s/it] {'loss': 2.2531, 'grad_norm': 0.0009968483509093743, 'learning_rate': 0.8325108242575692, 'epoch': 0.2} + 20%|██ | 105/520 [07:10<25:17, 3.66s/it] 20%|██ | 106/520 [07:14<25:19, 3.67s/it] {'loss': 2.8321, 'grad_norm': 0.0016366911107225668, 'learning_rate': 0.8310258896527278, 'epoch': 0.2} + 20%|██ | 106/520 [07:14<25:19, 3.67s/it] 21%|██ | 107/520 [07:18<25:17, 3.67s/it] {'loss': 2.9391, 'grad_norm': 0.001828063354404186, 'learning_rate': 0.8295261506157986, 'epoch': 0.21} + 21%|██ | 107/520 [07:18<25:17, 3.67s/it] 21%|██ | 108/520 [07:21<25:19, 3.69s/it] {'loss': 2.1639, 'grad_norm': 0.0010577461405135793, 'learning_rate': 0.8280116654178472, 'epoch': 0.21} + 21%|██ | 108/520 [07:21<25:19, 3.69s/it] 21%|██ | 109/520 [07:25<25:15, 3.69s/it] {'loss': 2.826, 'grad_norm': 0.0017935531289727975, 'learning_rate': 0.8264824929028889, 'epoch': 0.21} + 21%|██ | 109/520 [07:25<25:15, 3.69s/it] 21%|██ | 110/520 [07:29<25:08, 3.68s/it] {'loss': 2.435, 'grad_norm': 0.0011749824938823553, 'learning_rate': 0.824938692485602, 'epoch': 0.21} + 21%|██ | 110/520 [07:29<25:08, 3.68s/it] 21%|██▏ | 111/520 [07:33<25:07, 3.69s/it] {'loss': 2.4706, 'grad_norm': 0.001112647087383688, 'learning_rate': 0.8233803241490194, 'epoch': 0.21} + 21%|██▏ | 111/520 [07:33<25:07, 3.69s/it] 22%|██▏ | 112/520 [07:36<25:10, 3.70s/it] {'loss': 2.3385, 'grad_norm': 0.0010176183428253873, 'learning_rate': 0.8218074484421978, 'epoch': 0.22} + 22%|██▏ | 112/520 [07:36<25:10, 3.70s/it] 22%|██▏ | 113/520 [07:40<25:00, 3.69s/it] {'loss': 2.0786, 'grad_norm': 0.0010065492152404297, 'learning_rate': 0.820220126477865, 'epoch': 0.22} + 22%|██▏ | 113/520 [07:40<25:00, 3.69s/it] 22%|██▏ | 114/520 [07:44<25:02, 3.70s/it] {'loss': 2.2887, 'grad_norm': 0.0010036049075128662, 'learning_rate': 0.8186184199300464, 'epoch': 0.22} + 22%|██▏ | 114/520 [07:44<25:02, 3.70s/it] 22%|██▏ | 115/520 [07:47<24:57, 3.70s/it] {'loss': 2.465, 'grad_norm': 0.0009375781686224811, 'learning_rate': 0.817002391031667, 'epoch': 0.22} + 22%|██▏ | 115/520 [07:47<24:57, 3.70s/it] 22%|██▏ | 116/520 [07:51<24:49, 3.69s/it] {'loss': 2.3129, 'grad_norm': 0.0009107072508409982, 'learning_rate': 0.8153721025721355, 'epoch': 0.22} + 22%|██▏ | 116/520 [07:51<24:49, 3.69s/it] 22%|██▎ | 117/520 [07:55<24:43, 3.68s/it] {'loss': 2.3823, 'grad_norm': 0.001090703671857063, 'learning_rate': 0.8137276178949024, 'epoch': 0.23} + 22%|██▎ | 117/520 [07:55<24:43, 3.68s/it] 23%|██▎ | 118/520 [07:58<24:38, 3.68s/it] {'loss': 2.1075, 'grad_norm': 0.0009545736755928987, 'learning_rate': 0.8120690008950008, 'epoch': 0.23} + 23%|██▎ | 118/520 [07:58<24:38, 3.68s/it] 23%|██▎ | 119/520 [08:02<24:38, 3.69s/it] {'loss': 2.0754, 'grad_norm': 0.0008797079077013614, 'learning_rate': 0.8103963160165627, 'epoch': 0.23} + 23%|██▎ | 119/520 [08:02<24:38, 3.69s/it] 23%|██▎ | 120/520 [08:06<24:28, 3.67s/it] {'loss': 2.1097, 'grad_norm': 0.0010430860668922994, 'learning_rate': 0.8087096282503151, 'epoch': 0.23} + 23%|██▎ | 120/520 [08:06<24:28, 3.67s/it] 23%|██▎ | 121/520 [08:09<24:22, 3.67s/it] {'loss': 2.1605, 'grad_norm': 0.0010221220014580185, 'learning_rate': 0.8070090031310558, 'epoch': 0.23} + 23%|██▎ | 121/520 [08:09<24:22, 3.67s/it] 23%|██▎ | 122/520 [08:13<24:19, 3.67s/it] {'loss': 2.0599, 'grad_norm': 0.0009184889720171649, 'learning_rate': 0.8052945067351059, 'epoch': 0.23} + 23%|██▎ | 122/520 [08:13<24:19, 3.67s/it] 24%|██▎ | 123/520 [08:17<24:17, 3.67s/it] {'loss': 2.785, 'grad_norm': 0.0013181883461516218, 'learning_rate': 0.8035662056777432, 'epoch': 0.24} + 24%|██▎ | 123/520 [08:17<24:17, 3.67s/it] 24%|██▍ | 124/520 [08:20<24:10, 3.66s/it] {'loss': 2.2225, 'grad_norm': 0.0010774547478248227, 'learning_rate': 0.8018241671106134, 'epoch': 0.24} + 24%|██▍ | 124/520 [08:20<24:10, 3.66s/it] 24%|██▍ | 125/520 [08:24<24:13, 3.68s/it] {'loss': 2.0999, 'grad_norm': 0.0009385758385206525, 'learning_rate': 0.8000684587191217, 'epoch': 0.24} + 24%|██▍ | 125/520 [08:24<24:13, 3.68s/it] 24%|██▍ | 126/520 [08:28<25:32, 3.89s/it] {'loss': 2.4672, 'grad_norm': 0.0010781582458191613, 'learning_rate': 0.7982991487198023, 'epoch': 0.24} + 24%|██▍ | 126/520 [08:28<25:32, 3.89s/it] 24%|██▍ | 127/520 [08:32<25:07, 3.83s/it] {'loss': 2.1578, 'grad_norm': 0.0009231689160720394, 'learning_rate': 0.7965163058576683, 'epoch': 0.24} + 24%|██▍ | 127/520 [08:32<25:07, 3.83s/it] 25%|██▍ | 128/520 [08:36<24:42, 3.78s/it] {'loss': 2.1725, 'grad_norm': 0.000905593027059816, 'learning_rate': 0.7947199994035401, 'epoch': 0.25} + 25%|██▍ | 128/520 [08:36<24:42, 3.78s/it] 25%|██▍ | 129/520 [08:39<24:24, 3.75s/it] {'loss': 2.0126, 'grad_norm': 0.0007557578080051694, 'learning_rate': 0.7929102991513549, 'epoch': 0.25} + 25%|██▍ | 129/520 [08:39<24:24, 3.75s/it] 25%|██▌ | 130/520 [08:43<24:14, 3.73s/it] {'loss': 2.1581, 'grad_norm': 0.0008921437284691687, 'learning_rate': 0.7910872754154539, 'epoch': 0.25} + 25%|██▌ | 130/520 [08:43<24:14, 3.73s/it] 25%|██▌ | 131/520 [08:47<24:08, 3.72s/it] {'loss': 2.5177, 'grad_norm': 0.001021000969661411, 'learning_rate': 0.7892509990278509, 'epoch': 0.25} + 25%|██▌ | 131/520 [08:47<24:08, 3.72s/it] 25%|██▌ | 132/520 [08:50<23:54, 3.70s/it] {'loss': 2.1704, 'grad_norm': 0.0009104771303856912, 'learning_rate': 0.7874015413354805, 'epoch': 0.25} + 25%|██▌ | 132/520 [08:50<23:54, 3.70s/it] 26%|██▌ | 133/520 [08:54<23:51, 3.70s/it] {'loss': 2.1134, 'grad_norm': 0.0009346001826017232, 'learning_rate': 0.7855389741974245, 'epoch': 0.26} + 26%|██▌ | 133/520 [08:54<23:51, 3.70s/it] 26%|██▌ | 134/520 [08:58<23:44, 3.69s/it] {'loss': 2.1684, 'grad_norm': 0.0011896998792915343, 'learning_rate': 0.783663369982122, 'epoch': 0.26} + 26%|██▌ | 134/520 [08:58<23:44, 3.69s/it] 26%|██▌ | 135/520 [09:02<23:40, 3.69s/it] {'loss': 2.2721, 'grad_norm': 0.0008784386723141482, 'learning_rate': 0.7817748015645558, 'epoch': 0.26} + 26%|██▌ | 135/520 [09:02<23:40, 3.69s/it] 26%|██▌ | 136/520 [09:05<23:44, 3.71s/it] {'loss': 2.1448, 'grad_norm': 0.0008824729959425378, 'learning_rate': 0.7798733423234219, 'epoch': 0.26} + 26%|██▌ | 136/520 [09:05<23:44, 3.71s/it] 26%|██▋ | 137/520 [09:09<23:43, 3.72s/it] {'loss': 2.1744, 'grad_norm': 0.000949304175968924, 'learning_rate': 0.7779590661382778, 'epoch': 0.26} + 26%|██▋ | 137/520 [09:09<23:43, 3.72s/it] 27%|██▋ | 138/520 [09:13<23:40, 3.72s/it] {'loss': 2.0463, 'grad_norm': 0.0007919084245499726, 'learning_rate': 0.7760320473866728, 'epoch': 0.27} + 27%|██▋ | 138/520 [09:13<23:40, 3.72s/it] 27%|██▋ | 139/520 [09:16<23:38, 3.72s/it] {'loss': 2.3596, 'grad_norm': 0.0013448444662861694, 'learning_rate': 0.774092360941257, 'epoch': 0.27} + 27%|██▋ | 139/520 [09:16<23:38, 3.72s/it] 27%|██▋ | 140/520 [09:20<23:30, 3.71s/it] {'loss': 2.5848, 'grad_norm': 0.001081667686944617, 'learning_rate': 0.7721400821668734, 'epoch': 0.27} + 27%|██▋ | 140/520 [09:20<23:30, 3.71s/it] 27%|██▋ | 141/520 [09:24<23:31, 3.72s/it] {'loss': 2.1404, 'grad_norm': 0.0009203149012128669, 'learning_rate': 0.7701752869176286, 'epoch': 0.27} + 27%|██▋ | 141/520 [09:24<23:31, 3.72s/it] 27%|██▋ | 142/520 [09:28<23:23, 3.71s/it] {'loss': 2.5489, 'grad_norm': 0.0012430967200721388, 'learning_rate': 0.7681980515339464, 'epoch': 0.27} + 27%|██▋ | 142/520 [09:28<23:23, 3.71s/it] 28%|██▊ | 143/520 [09:31<23:18, 3.71s/it] {'loss': 2.141, 'grad_norm': 0.0009956788980513885, 'learning_rate': 0.7662084528396012, 'epoch': 0.28} + 28%|██▊ | 143/520 [09:31<23:18, 3.71s/it] 28%|██▊ | 144/520 [09:35<23:12, 3.70s/it] {'loss': 1.9815, 'grad_norm': 0.000932961467884054, 'learning_rate': 0.7642065681387328, 'epoch': 0.28} + 28%|██▊ | 144/520 [09:35<23:12, 3.70s/it] 28%|██▊ | 145/520 [09:39<23:12, 3.71s/it] {'loss': 1.9167, 'grad_norm': 0.0008526142755273127, 'learning_rate': 0.7621924752128437, 'epoch': 0.28} + 28%|██▊ | 145/520 [09:39<23:12, 3.71s/it] 28%|██▊ | 146/520 [09:42<23:12, 3.72s/it] {'loss': 2.5863, 'grad_norm': 0.0010129843602107794, 'learning_rate': 0.7601662523177761, 'epoch': 0.28} + 28%|██▊ | 146/520 [09:42<23:12, 3.72s/it] 28%|██▊ | 147/520 [09:46<23:10, 3.73s/it] {'loss': 1.9445, 'grad_norm': 0.0008885177160608492, 'learning_rate': 0.7581279781806721, 'epoch': 0.28} + 28%|██▊ | 147/520 [09:46<23:10, 3.73s/it] 28%|██▊ | 148/520 [09:50<23:09, 3.73s/it] {'loss': 2.0933, 'grad_norm': 0.000890819259281516, 'learning_rate': 0.7560777319969136, 'epoch': 0.28} + 28%|██▊ | 148/520 [09:50<23:09, 3.73s/it] 29%|██▊ | 149/520 [09:54<23:05, 3.74s/it] {'loss': 2.0245, 'grad_norm': 0.001112797243661204, 'learning_rate': 0.7540155934270472, 'epoch': 0.29} + 29%|██▊ | 149/520 [09:54<23:05, 3.74s/it] 29%|██▉ | 150/520 [09:57<22:59, 3.73s/it] {'loss': 2.2202, 'grad_norm': 0.0008743273763653854, 'learning_rate': 0.7519416425936866, 'epoch': 0.29} + 29%|██▉ | 150/520 [09:57<22:59, 3.73s/it] 29%|██▉ | 151/520 [10:01<22:53, 3.72s/it] {'loss': 2.0172, 'grad_norm': 0.0009004222871767154, 'learning_rate': 0.7498559600784018, 'epoch': 0.29} + 29%|██▉ | 151/520 [10:01<22:53, 3.72s/it] 29%|██▉ | 152/520 [10:05<22:52, 3.73s/it] {'loss': 2.0011, 'grad_norm': 0.000901612844571866, 'learning_rate': 0.7477586269185867, 'epoch': 0.29} + 29%|██▉ | 152/520 [10:05<22:52, 3.73s/it] 29%|██▉ | 153/520 [10:09<22:42, 3.71s/it] {'loss': 2.0048, 'grad_norm': 0.0008357758917088006, 'learning_rate': 0.7456497246043112, 'epoch': 0.29} + 29%|██▉ | 153/520 [10:09<22:42, 3.71s/it] 30%|██▉ | 154/520 [10:12<22:43, 3.73s/it] {'loss': 2.1426, 'grad_norm': 0.0007841757328401262, 'learning_rate': 0.7435293350751545, 'epoch': 0.3} + 30%|██▉ | 154/520 [10:12<22:43, 3.73s/it] 30%|██▉ | 155/520 [10:16<22:36, 3.72s/it] {'loss': 1.9916, 'grad_norm': 0.0010028546264578063, 'learning_rate': 0.7413975407170217, 'epoch': 0.3} + 30%|██▉ | 155/520 [10:16<22:36, 3.72s/it] 30%|███ | 156/520 [10:20<22:36, 3.73s/it] {'loss': 2.1141, 'grad_norm': 0.0010782238964395885, 'learning_rate': 0.7392544243589427, 'epoch': 0.3} + 30%|███ | 156/520 [10:20<22:36, 3.73s/it] 30%|███ | 157/520 [10:23<22:31, 3.72s/it] {'loss': 2.5743, 'grad_norm': 0.0011035173217304676, 'learning_rate': 0.7371000692698539, 'epoch': 0.3} + 30%|███ | 157/520 [10:23<22:31, 3.72s/it] 30%|███ | 158/520 [10:27<22:27, 3.72s/it] {'loss': 2.0558, 'grad_norm': 0.0008848816332947422, 'learning_rate': 0.734934559155363, 'epoch': 0.3} + 30%|███ | 158/520 [10:27<22:27, 3.72s/it] 31%|███ | 159/520 [10:31<22:19, 3.71s/it] {'loss': 2.0231, 'grad_norm': 0.000772341476967771, 'learning_rate': 0.7327579781544963, 'epoch': 0.31} + 31%|███ | 159/520 [10:31<22:19, 3.71s/it] 31%|███ | 160/520 [10:35<22:15, 3.71s/it] {'loss': 2.0254, 'grad_norm': 0.0009105255444166573, 'learning_rate': 0.7305704108364302, 'epoch': 0.31} + 31%|███ | 160/520 [10:35<22:15, 3.71s/it] 31%|███ | 161/520 [10:38<22:09, 3.70s/it] {'loss': 2.0844, 'grad_norm': 0.0009446303762241928, 'learning_rate': 0.7283719421972047, 'epoch': 0.31} + 31%|███ | 161/520 [10:38<22:09, 3.70s/it] 31%|███ | 162/520 [10:42<22:00, 3.69s/it] {'loss': 2.4613, 'grad_norm': 0.0015152963413655504, 'learning_rate': 0.7261626576564214, 'epoch': 0.31} + 31%|███ | 162/520 [10:42<22:00, 3.69s/it] 31%|███▏ | 163/520 [10:46<22:02, 3.70s/it] {'loss': 2.0046, 'grad_norm': 0.001183600197708171, 'learning_rate': 0.7239426430539243, 'epoch': 0.31} + 31%|███▏ | 163/520 [10:46<22:02, 3.70s/it] 32%|███▏ | 164/520 [10:49<21:58, 3.70s/it] {'loss': 1.8679, 'grad_norm': 0.0008821054003986865, 'learning_rate': 0.7217119846464648, 'epoch': 0.32} + 32%|███▏ | 164/520 [10:49<21:58, 3.70s/it] 32%|███▏ | 165/520 [10:53<21:55, 3.70s/it] {'loss': 1.9693, 'grad_norm': 0.000852566828617237, 'learning_rate': 0.7194707691043501, 'epoch': 0.32} + 32%|███▏ | 165/520 [10:53<21:55, 3.70s/it] 32%|███▏ | 166/520 [10:57<21:48, 3.70s/it] {'loss': 2.0366, 'grad_norm': 0.0008668080532719676, 'learning_rate': 0.7172190835080757, 'epoch': 0.32} + 32%|███▏ | 166/520 [10:57<21:48, 3.70s/it] 32%|███▏ | 167/520 [11:00<21:43, 3.69s/it] {'loss': 1.9946, 'grad_norm': 0.0008731478711179118, 'learning_rate': 0.7149570153449422, 'epoch': 0.32} + 32%|███▏ | 167/520 [11:00<21:43, 3.69s/it] 32%|███▏ | 168/520 [11:04<21:40, 3.69s/it] {'loss': 1.9479, 'grad_norm': 0.0008397275938522689, 'learning_rate': 0.7126846525056555, 'epoch': 0.32} + 32%|███▏ | 168/520 [11:04<21:40, 3.69s/it] 32%|███▎ | 169/520 [11:08<21:34, 3.69s/it] {'loss': 1.9862, 'grad_norm': 0.0007184251330808772, 'learning_rate': 0.7104020832809127, 'epoch': 0.33} + 32%|███▎ | 169/520 [11:08<21:34, 3.69s/it] 33%|███▎ | 170/520 [11:11<21:28, 3.68s/it] {'loss': 2.3183, 'grad_norm': 0.0010412166660542252, 'learning_rate': 0.7081093963579708, 'epoch': 0.33} + 33%|███▎ | 170/520 [11:11<21:28, 3.68s/it] 33%|███▎ | 171/520 [11:15<21:24, 3.68s/it] {'loss': 1.9093, 'grad_norm': 0.0009930137686256536, 'learning_rate': 0.7058066808172017, 'epoch': 0.33} + 33%|███▎ | 171/520 [11:15<21:24, 3.68s/it] 33%|███▎ | 172/520 [11:19<21:16, 3.67s/it] {'loss': 2.0378, 'grad_norm': 0.000786851702610843, 'learning_rate': 0.7034940261286299, 'epoch': 0.33} + 33%|███▎ | 172/520 [11:19<21:16, 3.67s/it] 33%|███▎ | 173/520 [11:22<21:13, 3.67s/it] {'loss': 1.8872, 'grad_norm': 0.0007733636367682381, 'learning_rate': 0.7011715221484579, 'epoch': 0.33} + 33%|███▎ | 173/520 [11:22<21:13, 3.67s/it] 33%|███▎ | 174/520 [11:26<21:08, 3.67s/it] {'loss': 2.0327, 'grad_norm': 0.0008705903653652881, 'learning_rate': 0.6988392591155728, 'epoch': 0.33} + 33%|███▎ | 174/520 [11:26<21:08, 3.67s/it] 34%|███▎ | 175/520 [11:30<21:08, 3.68s/it] {'loss': 1.9141, 'grad_norm': 0.0008275874656867292, 'learning_rate': 0.6964973276480422, 'epoch': 0.34} + 34%|███▎ | 175/520 [11:30<21:08, 3.68s/it] 34%|███▍ | 176/520 [11:34<21:11, 3.70s/it] {'loss': 2.4662, 'grad_norm': 0.0010263010130768216, 'learning_rate': 0.6941458187395918, 'epoch': 0.34} + 34%|███▍ | 176/520 [11:34<21:11, 3.70s/it] 34%|███▍ | 177/520 [11:37<21:03, 3.68s/it] {'loss': 2.2681, 'grad_norm': 0.0010349140524422117, 'learning_rate': 0.6917848237560709, 'epoch': 0.34} + 34%|███▍ | 177/520 [11:37<21:03, 3.68s/it] 34%|███▍ | 178/520 [11:41<20:56, 3.67s/it] {'loss': 1.9913, 'grad_norm': 0.0008580672163658215, 'learning_rate': 0.6894144344319014, 'epoch': 0.34} + 34%|███▍ | 178/520 [11:41<20:56, 3.67s/it] 34%|███▍ | 179/520 [11:45<20:51, 3.67s/it] {'loss': 2.0761, 'grad_norm': 0.0008520065451612534, 'learning_rate': 0.6870347428665153, 'epoch': 0.34} + 34%|███▍ | 179/520 [11:45<20:51, 3.67s/it] 35%|███▍ | 180/520 [11:48<20:50, 3.68s/it] {'loss': 1.9952, 'grad_norm': 0.0008110323395387569, 'learning_rate': 0.6846458415207741, 'epoch': 0.35} + 35%|███▍ | 180/520 [11:48<20:50, 3.68s/it] 35%|███▍ | 181/520 [11:52<21:04, 3.73s/it] {'loss': 1.9558, 'grad_norm': 0.0009628964381427051, 'learning_rate': 0.682247823213378, 'epoch': 0.35} + 35%|███▍ | 181/520 [11:52<21:04, 3.73s/it] 35%|███▌ | 182/520 [11:56<21:09, 3.76s/it] {'loss': 2.0629, 'grad_norm': 0.00089848234673862, 'learning_rate': 0.6798407811172587, 'epoch': 0.35} + 35%|███▌ | 182/520 [11:56<21:09, 3.76s/it] 35%|███▌ | 183/520 [12:00<21:00, 3.74s/it] {'loss': 1.9864, 'grad_norm': 0.0007732770348726383, 'learning_rate': 0.6774248087559589, 'epoch': 0.35} + 35%|███▌ | 183/520 [12:00<21:00, 3.74s/it] 35%|███▌ | 184/520 [12:03<20:57, 3.74s/it] {'loss': 1.8921, 'grad_norm': 0.0009844553899520502, 'learning_rate': 0.675, 'epoch': 0.35} + 35%|███▌ | 184/520 [12:03<20:57, 3.74s/it] 36%|███▌ | 185/520 [12:07<20:50, 3.73s/it] {'loss': 2.1542, 'grad_norm': 0.0008419081687678351, 'learning_rate': 0.6725664490632334, 'epoch': 0.36} + 36%|███▌ | 185/520 [12:07<20:50, 3.73s/it] 36%|███▌ | 186/520 [12:11<20:43, 3.72s/it] {'loss': 1.8921, 'grad_norm': 0.0008025146579071201, 'learning_rate': 0.6701242504991802, 'epoch': 0.36} + 36%|███▌ | 186/520 [12:11<20:43, 3.72s/it] 36%|███▌ | 187/520 [12:14<20:41, 3.73s/it] {'loss': 1.972, 'grad_norm': 0.0009076286911435655, 'learning_rate': 0.667673499197358, 'epoch': 0.36} + 36%|███▌ | 187/520 [12:14<20:41, 3.73s/it] 36%|███▌ | 188/520 [12:18<20:28, 3.70s/it] {'loss': 1.9848, 'grad_norm': 0.000750704558204172, 'learning_rate': 0.6652142903795932, 'epoch': 0.36} + 36%|███▌ | 188/520 [12:18<20:28, 3.70s/it] 36%|███▋ | 189/520 [12:22<20:22, 3.69s/it] {'loss': 2.0473, 'grad_norm': 0.0008290317110576817, 'learning_rate': 0.6627467195963223, 'epoch': 0.36} + 36%|███▋ | 189/520 [12:22<20:22, 3.69s/it] 37%|███▋ | 190/520 [12:25<20:15, 3.68s/it] {'loss': 1.961, 'grad_norm': 0.000890771166569762, 'learning_rate': 0.6602708827228779, 'epoch': 0.37} + 37%|███▋ | 190/520 [12:25<20:15, 3.68s/it] 37%|███▋ | 191/520 [12:29<20:17, 3.70s/it] {'loss': 1.8939, 'grad_norm': 0.000844987439946606, 'learning_rate': 0.6577868759557653, 'epoch': 0.37} + 37%|███▋ | 191/520 [12:29<20:17, 3.70s/it] 37%|███▋ | 192/520 [12:33<20:17, 3.71s/it] {'loss': 2.0042, 'grad_norm': 0.0008594269745248715, 'learning_rate': 0.6552947958089234, 'epoch': 0.37} + 37%|███▋ | 192/520 [12:33<20:17, 3.71s/it] 37%|███▋ | 193/520 [12:37<20:08, 3.70s/it] {'loss': 2.4007, 'grad_norm': 0.0010334377026623442, 'learning_rate': 0.6527947391099754, 'epoch': 0.37} + 37%|███▋ | 193/520 [12:37<20:08, 3.70s/it] 37%|███▋ | 194/520 [12:40<20:08, 3.71s/it] {'loss': 2.1847, 'grad_norm': 0.0009324362624120524, 'learning_rate': 0.6502868029964665, 'epoch': 0.37} + 37%|███▋ | 194/520 [12:40<20:08, 3.71s/it] 38%|███▊ | 195/520 [12:44<19:59, 3.69s/it] {'loss': 1.9361, 'grad_norm': 0.0008068886399712649, 'learning_rate': 0.6477710849120902, 'epoch': 0.38} + 38%|███▊ | 195/520 [12:44<19:59, 3.69s/it] 38%|███▊ | 196/520 [12:48<19:52, 3.68s/it] {'loss': 1.9706, 'grad_norm': 0.0008147686337532004, 'learning_rate': 0.6452476826029011, 'epoch': 0.38} + 38%|███▊ | 196/520 [12:48<19:52, 3.68s/it] 38%|███▊ | 197/520 [12:51<19:52, 3.69s/it] {'loss': 1.8882, 'grad_norm': 0.0007857696433160351, 'learning_rate': 0.6427166941135182, 'epoch': 0.38} + 38%|███▊ | 197/520 [12:51<19:52, 3.69s/it] 38%|███▊ | 198/520 [12:55<19:44, 3.68s/it] {'loss': 2.0729, 'grad_norm': 0.0008861005508345118, 'learning_rate': 0.6401782177833147, 'epoch': 0.38} + 38%|███▊ | 198/520 [12:55<19:44, 3.68s/it] 38%|███▊ | 199/520 [12:59<19:36, 3.67s/it] {'loss': 1.9328, 'grad_norm': 0.0008576054735654452, 'learning_rate': 0.6376323522425977, 'epoch': 0.38} + 38%|███▊ | 199/520 [12:59<19:36, 3.67s/it] 38%|███▊ | 200/520 [13:02<19:31, 3.66s/it] {'loss': 2.2545, 'grad_norm': 0.0009888454972801363, 'learning_rate': 0.6350791964087753, 'epoch': 0.38} + 38%|███▊ | 200/520 [13:02<19:31, 3.66s/it] 39%|███▊ | 201/520 [13:06<19:31, 3.67s/it] {'loss': 2.1864, 'grad_norm': 0.0008940188929834016, 'learning_rate': 0.6325188494825138, 'epoch': 0.39} + 39%|███▊ | 201/520 [13:06<19:31, 3.67s/it] 39%|███▉ | 202/520 [13:10<19:34, 3.69s/it] {'loss': 1.9325, 'grad_norm': 0.0009186076304524333, 'learning_rate': 0.6299514109438834, 'epoch': 0.39} + 39%|███▉ | 202/520 [13:10<19:34, 3.69s/it] 39%|███▉ | 203/520 [13:13<19:34, 3.71s/it] {'loss': 1.9916, 'grad_norm': 0.0008418636793589293, 'learning_rate': 0.6273769805484928, 'epoch': 0.39} + 39%|███▉ | 203/520 [13:13<19:34, 3.71s/it] 39%|███▉ | 204/520 [13:17<19:29, 3.70s/it] {'loss': 2.0684, 'grad_norm': 0.0008404108221184476, 'learning_rate': 0.6247956583236127, 'epoch': 0.39} + 39%|███▉ | 204/520 [13:17<19:29, 3.70s/it] 39%|███▉ | 205/520 [13:21<19:26, 3.70s/it] {'loss': 2.2955, 'grad_norm': 0.0009668817600404162, 'learning_rate': 0.6222075445642904, 'epoch': 0.39} + 39%|███▉ | 205/520 [13:21<19:26, 3.70s/it] 40%|███▉ | 206/520 [13:25<19:25, 3.71s/it] {'loss': 2.0357, 'grad_norm': 0.0008194283529962855, 'learning_rate': 0.6196127398294524, 'epoch': 0.4} + 40%|███▉ | 206/520 [13:25<19:25, 3.71s/it] 40%|███▉ | 207/520 [13:28<19:16, 3.70s/it] {'loss': 2.2276, 'grad_norm': 0.0008454167679523339, 'learning_rate': 0.617011344937997, 'epoch': 0.4} + 40%|███▉ | 207/520 [13:28<19:16, 3.70s/it] 40%|████ | 208/520 [13:32<19:08, 3.68s/it] {'loss': 1.9641, 'grad_norm': 0.0009114698485607296, 'learning_rate': 0.6144034609648779, 'epoch': 0.4} + 40%|████ | 208/520 [13:32<19:08, 3.68s/it] 40%|████ | 209/520 [13:36<19:02, 3.67s/it] {'loss': 1.9699, 'grad_norm': 0.0008048987065141168, 'learning_rate': 0.6117891892371754, 'epoch': 0.4} + 40%|████ | 209/520 [13:36<19:02, 3.67s/it] 40%|████ | 210/520 [13:39<18:54, 3.66s/it] {'loss': 1.9253, 'grad_norm': 0.0008180169232832024, 'learning_rate': 0.6091686313301616, 'epoch': 0.4} + 40%|████ | 210/520 [13:39<18:54, 3.66s/it] 41%|████ | 211/520 [13:43<18:55, 3.68s/it] {'loss': 1.9681, 'grad_norm': 0.0007037975688110987, 'learning_rate': 0.6065418890633522, 'epoch': 0.41} + 41%|████ | 211/520 [13:43<18:55, 3.68s/it] 41%|████ | 212/520 [13:47<18:54, 3.68s/it] {'loss': 1.9107, 'grad_norm': 0.0008121678959267147, 'learning_rate': 0.603909064496551, 'epoch': 0.41} + 41%|████ | 212/520 [13:47<18:54, 3.68s/it] 41%|████ | 213/520 [13:50<18:51, 3.68s/it] {'loss': 1.9772, 'grad_norm': 0.0008896369765021791, 'learning_rate': 0.6012702599258839, 'epoch': 0.41} + 41%|████ | 213/520 [13:50<18:51, 3.68s/it] 41%|████ | 214/520 [13:54<18:45, 3.68s/it] {'loss': 2.0116, 'grad_norm': 0.0009214534972542781, 'learning_rate': 0.5986255778798253, 'epoch': 0.41} + 41%|████ | 214/520 [13:54<18:45, 3.68s/it] 41%|████▏ | 215/520 [13:58<18:40, 3.67s/it] {'loss': 2.1212, 'grad_norm': 0.0009434612343925121, 'learning_rate': 0.5959751211152132, 'epoch': 0.41} + 41%|████▏ | 215/520 [13:58<18:40, 3.67s/it] 42%|████▏ | 216/520 [14:01<18:34, 3.67s/it] {'loss': 1.841, 'grad_norm': 0.0007786330904650452, 'learning_rate': 0.593318992613258, 'epoch': 0.42} + 42%|████▏ | 216/520 [14:01<18:34, 3.67s/it] 42%|████▏ | 217/520 [14:05<18:30, 3.66s/it] {'loss': 1.9723, 'grad_norm': 0.000806579762987819, 'learning_rate': 0.59065729557554, 'epoch': 0.42} + 42%|████▏ | 217/520 [14:05<18:30, 3.66s/it] 42%|████▏ | 218/520 [14:09<18:27, 3.67s/it] {'loss': 2.004, 'grad_norm': 0.0008497679901121696, 'learning_rate': 0.5879901334200005, 'epoch': 0.42} + 42%|████▏ | 218/520 [14:09<18:27, 3.67s/it] 42%|████▏ | 219/520 [14:12<18:31, 3.69s/it] {'loss': 1.8661, 'grad_norm': 0.0006694705044934289, 'learning_rate': 0.5853176097769228, 'epoch': 0.42} + 42%|████▏ | 219/520 [14:12<18:31, 3.69s/it] 42%|████▏ | 220/520 [14:16<18:30, 3.70s/it] {'loss': 2.1958, 'grad_norm': 0.001189075312405104, 'learning_rate': 0.5826398284849069, 'epoch': 0.42} + 42%|████▏ | 220/520 [14:16<18:30, 3.70s/it] 42%|████▎ | 221/520 [14:20<18:26, 3.70s/it] {'loss': 1.9151, 'grad_norm': 0.0007410195615631299, 'learning_rate': 0.5799568935868334, 'epoch': 0.42} + 42%|████▎ | 221/520 [14:20<18:26, 3.70s/it] 43%|████▎ | 222/520 [14:23<18:21, 3.70s/it] {'loss': 1.8131, 'grad_norm': 0.0007867162900161899, 'learning_rate': 0.5772689093258224, 'epoch': 0.43} + 43%|████▎ | 222/520 [14:23<18:21, 3.70s/it] 43%|████▎ | 223/520 [14:27<18:14, 3.69s/it] {'loss': 1.7954, 'grad_norm': 0.0007285072179251573, 'learning_rate': 0.5745759801411822, 'epoch': 0.43} + 43%|████▎ | 223/520 [14:27<18:14, 3.69s/it] 43%|████▎ | 224/520 [14:31<18:13, 3.69s/it] {'loss': 2.7571, 'grad_norm': 0.0012184531251411906, 'learning_rate': 0.5718782106643524, 'epoch': 0.43} + 43%|████▎ | 224/520 [14:31<18:13, 3.69s/it] 43%|████▎ | 225/520 [14:35<18:10, 3.70s/it] {'loss': 1.8648, 'grad_norm': 0.0007950287325062754, 'learning_rate': 0.5691757057148372, 'epoch': 0.43} + 43%|████▎ | 225/520 [14:35<18:10, 3.70s/it] 43%|████▎ | 226/520 [14:38<18:04, 3.69s/it] {'loss': 1.9412, 'grad_norm': 0.0007845050389188664, 'learning_rate': 0.5664685702961344, 'epoch': 0.43} + 43%|████▎ | 226/520 [14:38<18:04, 3.69s/it] 44%|████▎ | 227/520 [14:42<18:08, 3.72s/it] {'loss': 1.897, 'grad_norm': 0.0008185252881038252, 'learning_rate': 0.5637569095916538, 'epoch': 0.44} + 44%|████▎ | 227/520 [14:42<18:08, 3.72s/it] 44%|████▍ | 228/520 [14:46<18:07, 3.73s/it] {'loss': 2.3831, 'grad_norm': 0.0010275892661989888, 'learning_rate': 0.5610408289606321, 'epoch': 0.44} + 44%|████▍ | 228/520 [14:46<18:07, 3.73s/it] 44%|████▍ | 229/520 [14:49<18:02, 3.72s/it] {'loss': 1.9187, 'grad_norm': 0.0007356748073959116, 'learning_rate': 0.558320433934038, 'epoch': 0.44} + 44%|████▍ | 229/520 [14:49<18:02, 3.72s/it] 44%|████▍ | 230/520 [14:53<18:03, 3.73s/it] {'loss': 1.7917, 'grad_norm': 0.0007839190739322376, 'learning_rate': 0.5555958302104719, 'epoch': 0.44} + 44%|████▍ | 230/520 [14:53<18:03, 3.73s/it] 44%|████▍ | 231/520 [14:57<17:56, 3.72s/it] {'loss': 1.8784, 'grad_norm': 0.0007814434043817631, 'learning_rate': 0.5528671236520604, 'epoch': 0.44} + 44%|████▍ | 231/520 [14:57<17:56, 3.72s/it] 45%|████▍ | 232/520 [15:01<17:47, 3.71s/it] {'loss': 2.4338, 'grad_norm': 0.0009864574339154327, 'learning_rate': 0.5501344202803414, 'epoch': 0.45} + 45%|████▍ | 232/520 [15:01<17:47, 3.71s/it] 45%|████▍ | 233/520 [15:04<17:42, 3.70s/it] {'loss': 2.2383, 'grad_norm': 0.0009698660802950521, 'learning_rate': 0.5473978262721463, 'epoch': 0.45} + 45%|████▍ | 233/520 [15:04<17:42, 3.70s/it] 45%|████▌ | 234/520 [15:08<17:36, 3.70s/it] {'loss': 1.7962, 'grad_norm': 0.0008654600442003826, 'learning_rate': 0.5446574479554731, 'epoch': 0.45} + 45%|████▌ | 234/520 [15:08<17:36, 3.70s/it] 45%|████▌ | 235/520 [15:12<17:35, 3.70s/it] {'loss': 1.858, 'grad_norm': 0.0007850716441304986, 'learning_rate': 0.5419133918053562, 'epoch': 0.45} + 45%|████▌ | 235/520 [15:12<17:35, 3.70s/it] 45%|████▌ | 236/520 [15:15<17:32, 3.70s/it] {'loss': 2.0151, 'grad_norm': 0.0008772983867710992, 'learning_rate': 0.539165764439729, 'epoch': 0.45} + 45%|████▌ | 236/520 [15:15<17:32, 3.70s/it] 46%|████▌ | 237/520 [15:19<17:24, 3.69s/it] {'loss': 1.9303, 'grad_norm': 0.0008107658820742155, 'learning_rate': 0.5364146726152813, 'epoch': 0.46} + 46%|████▌ | 237/520 [15:19<17:24, 3.69s/it] 46%|████▌ | 238/520 [15:23<17:16, 3.68s/it] {'loss': 1.8641, 'grad_norm': 0.0008221403702482364, 'learning_rate': 0.5336602232233116, 'epoch': 0.46} + 46%|████▌ | 238/520 [15:23<17:16, 3.68s/it] 46%|████▌ | 239/520 [15:26<17:09, 3.66s/it] {'loss': 2.0444, 'grad_norm': 0.0008413781329630385, 'learning_rate': 0.5309025232855737, 'epoch': 0.46} + 46%|████▌ | 239/520 [15:26<17:09, 3.66s/it] 46%|████▌ | 240/520 [15:30<17:08, 3.67s/it] {'loss': 1.6725, 'grad_norm': 0.0006853270645898597, 'learning_rate': 0.5281416799501187, 'epoch': 0.46} + 46%|████▌ | 240/520 [15:30<17:08, 3.67s/it] 46%|████▋ | 241/520 [15:34<17:06, 3.68s/it] {'loss': 1.7545, 'grad_norm': 0.0008554811240962374, 'learning_rate': 0.5253778004871315, 'epoch': 0.46} + 46%|████▋ | 241/520 [15:34<17:06, 3.68s/it] 47%|████▋ | 242/520 [15:37<17:02, 3.68s/it] {'loss': 1.8359, 'grad_norm': 0.000720970086300521, 'learning_rate': 0.522610992284763, 'epoch': 0.47} + 47%|████▋ | 242/520 [15:37<17:02, 3.68s/it] 47%|████▋ | 243/520 [15:41<17:02, 3.69s/it] {'loss': 1.8447, 'grad_norm': 0.0008067028865382759, 'learning_rate': 0.5198413628449582, 'epoch': 0.47} + 47%|████▋ | 243/520 [15:41<17:02, 3.69s/it] 47%|████▋ | 244/520 [15:45<16:58, 3.69s/it] {'loss': 2.0229, 'grad_norm': 0.0007913038537376786, 'learning_rate': 0.5170690197792784, 'epoch': 0.47} + 47%|████▋ | 244/520 [15:45<16:58, 3.69s/it] 47%|████▋ | 245/520 [15:48<16:52, 3.68s/it] {'loss': 1.8527, 'grad_norm': 0.0008397760713066965, 'learning_rate': 0.514294070804721, 'epoch': 0.47} + 47%|████▋ | 245/520 [15:48<16:52, 3.68s/it] 47%|████▋ | 246/520 [15:52<16:46, 3.67s/it] {'loss': 2.4039, 'grad_norm': 0.0010530799465505653, 'learning_rate': 0.5115166237395331, 'epoch': 0.47} + 47%|████▋ | 246/520 [15:52<16:46, 3.67s/it] 48%|████▊ | 247/520 [15:56<16:40, 3.67s/it] {'loss': 2.0139, 'grad_norm': 0.0007315085262973831, 'learning_rate': 0.5087367864990233, 'epoch': 0.47} + 48%|████▊ | 247/520 [15:56<16:40, 3.67s/it] 48%|████▊ | 248/520 [15:59<16:36, 3.66s/it] {'loss': 1.7809, 'grad_norm': 0.0008674020195134361, 'learning_rate': 0.5059546670913684, 'epoch': 0.48} + 48%|████▊ | 248/520 [15:59<16:36, 3.66s/it] 48%|████▊ | 249/520 [16:03<16:32, 3.66s/it] {'loss': 1.9772, 'grad_norm': 0.0008732450087365472, 'learning_rate': 0.5031703736134169, 'epoch': 0.48} + 48%|████▊ | 249/520 [16:03<16:32, 3.66s/it] 48%|████▊ | 250/520 [16:07<16:28, 3.66s/it] {'loss': 1.8796, 'grad_norm': 0.0008079278981100394, 'learning_rate': 0.5003840142464886, 'epoch': 0.48} + 48%|████▊ | 250/520 [16:07<16:28, 3.66s/it] 48%|████▊ | 251/520 [16:10<16:27, 3.67s/it] {'loss': 1.9202, 'grad_norm': 0.0007053882138103529, 'learning_rate': 0.49759569725217206, 'epoch': 0.48} + 48%|████▊ | 251/520 [16:10<16:27, 3.67s/it] 48%|████▊ | 252/520 [16:14<16:23, 3.67s/it] {'loss': 2.1927, 'grad_norm': 0.0008196131047179969, 'learning_rate': 0.4948055309681175, 'epoch': 0.48} + 48%|████▊ | 252/520 [16:14<16:23, 3.67s/it] 49%|████▊ | 253/520 [16:18<16:24, 3.69s/it] {'loss': 1.9459, 'grad_norm': 0.000891959245451319, 'learning_rate': 0.49201362380382774, 'epoch': 0.49} + 49%|████▊ | 253/520 [16:18<16:24, 3.69s/it] 49%|████▉ | 254/520 [16:21<16:18, 3.68s/it] {'loss': 1.8008, 'grad_norm': 0.00077364108314024, 'learning_rate': 0.48922008423644625, 'epoch': 0.49} + 49%|████▉ | 254/520 [16:21<16:18, 3.68s/it] 49%|████▉ | 255/520 [16:25<16:22, 3.71s/it] {'loss': 1.8489, 'grad_norm': 0.0008247885322546398, 'learning_rate': 0.48642502080654154, 'epoch': 0.49} + 49%|████▉ | 255/520 [16:25<16:22, 3.71s/it] 49%|████▉ | 256/520 [16:29<16:16, 3.70s/it] {'loss': 1.8923, 'grad_norm': 0.0007729334456219255, 'learning_rate': 0.48362854211389095, 'epoch': 0.49} + 49%|████▉ | 256/520 [16:29<16:16, 3.70s/it] 49%|████▉ | 257/520 [16:33<16:13, 3.70s/it] {'loss': 1.9484, 'grad_norm': 0.0008136678326038764, 'learning_rate': 0.4808307568132605, 'epoch': 0.49} + 49%|████▉ | 257/520 [16:33<16:13, 3.70s/it] 50%|████▉ | 258/520 [16:36<16:09, 3.70s/it] {'loss': 1.928, 'grad_norm': 0.0007314037502407208, 'learning_rate': 0.4780317736101835, 'epoch': 0.5} + 50%|████▉ | 258/520 [16:36<16:09, 3.70s/it] 50%|████▉ | 259/520 [16:40<16:02, 3.69s/it] {'loss': 2.0269, 'grad_norm': 0.0007824223642870968, 'learning_rate': 0.4752317012567363, 'epoch': 0.5} + 50%|████▉ | 259/520 [16:40<16:02, 3.69s/it] 50%|█████ | 260/520 [16:44<16:04, 3.71s/it] {'loss': 2.2807, 'grad_norm': 0.0010868996944736022, 'learning_rate': 0.4724306485473137, 'epoch': 0.5} + 50%|█████ | 260/520 [16:44<16:04, 3.71s/it] 50%|█████ | 261/520 [16:47<16:00, 3.71s/it] {'loss': 2.2266, 'grad_norm': 0.000874614747503352, 'learning_rate': 0.4696287243144013, 'epoch': 0.5} + 50%|█████ | 261/520 [16:47<16:00, 3.71s/it] 50%|█████ | 262/520 [16:51<15:57, 3.71s/it] {'loss': 1.8123, 'grad_norm': 0.0007990906057757631, 'learning_rate': 0.46682603742434664, 'epoch': 0.5} + 50%|█████ | 262/520 [16:51<15:57, 3.71s/it] 51%|█████ | 263/520 [16:55<15:57, 3.72s/it] {'loss': 2.2414, 'grad_norm': 0.0008925288307772281, 'learning_rate': 0.46402269677312996, 'epoch': 0.51} + 51%|█████ | 263/520 [16:55<15:57, 3.72s/it] 51%|█████ | 264/520 [16:59<15:50, 3.71s/it] {'loss': 1.9687, 'grad_norm': 0.0006695159645513291, 'learning_rate': 0.4612188112821328, 'epoch': 0.51} + 51%|█████ | 264/520 [16:59<15:50, 3.71s/it] 51%|█████ | 265/520 [17:02<15:45, 3.71s/it] {'loss': 1.8874, 'grad_norm': 0.000846102977254741, 'learning_rate': 0.45841448989390604, 'epoch': 0.51} + 51%|█████ | 265/520 [17:02<15:45, 3.71s/it] 51%|█████ | 266/520 [17:06<15:39, 3.70s/it] {'loss': 1.6438, 'grad_norm': 0.0006868024780098757, 'learning_rate': 0.4556098415679368, 'epoch': 0.51} + 51%|█████ | 266/520 [17:06<15:39, 3.70s/it] 51%|█████▏ | 267/520 [17:10<15:34, 3.69s/it] {'loss': 1.8323, 'grad_norm': 0.0006700136864542949, 'learning_rate': 0.4528049752764151, 'epoch': 0.51} + 51%|█████▏ | 267/520 [17:10<15:34, 3.69s/it] 52%|█████▏ | 268/520 [17:13<15:33, 3.70s/it] {'loss': 2.3954, 'grad_norm': 0.0008992234966523818, 'learning_rate': 0.45, 'epoch': 0.52} + 52%|█████▏ | 268/520 [17:13<15:33, 3.70s/it] 52%|█████▏ | 269/520 [17:17<15:25, 3.69s/it] {'loss': 1.9411, 'grad_norm': 0.00079798718902179, 'learning_rate': 0.4471950247235849, 'epoch': 0.52} + 52%|█████▏ | 269/520 [17:17<15:25, 3.69s/it] 52%|█████▏ | 270/520 [17:21<15:22, 3.69s/it] {'loss': 2.1006, 'grad_norm': 0.0012440347762766864, 'learning_rate': 0.44439015843206325, 'epoch': 0.52} + 52%|█████▏ | 270/520 [17:21<15:22, 3.69s/it] 52%|█████▏ | 271/520 [17:24<15:20, 3.70s/it] {'loss': 1.9692, 'grad_norm': 0.0007861180390676971, 'learning_rate': 0.4415855101060941, 'epoch': 0.52} + 52%|█████▏ | 271/520 [17:24<15:20, 3.70s/it] 52%|█████▏ | 272/520 [17:28<15:12, 3.68s/it] {'loss': 2.1649, 'grad_norm': 0.0008863989447212951, 'learning_rate': 0.4387811887178673, 'epoch': 0.52} + 52%|█████▏ | 272/520 [17:28<15:12, 3.68s/it] 52%|█████▎ | 273/520 [17:32<15:22, 3.74s/it] {'loss': 2.229, 'grad_norm': 0.0010959664690990518, 'learning_rate': 0.43597730322687017, 'epoch': 0.53} + 52%|█████▎ | 273/520 [17:32<15:22, 3.74s/it] 53%|█████▎ | 274/520 [17:36<15:16, 3.73s/it] {'loss': 1.8855, 'grad_norm': 0.0008472709544666328, 'learning_rate': 0.4331739625756535, 'epoch': 0.53} + 53%|█████▎ | 274/520 [17:36<15:16, 3.73s/it] 53%|█████▎ | 275/520 [17:39<15:07, 3.70s/it] {'loss': 1.7733, 'grad_norm': 0.0008006869080711004, 'learning_rate': 0.43037127568559885, 'epoch': 0.53} + 53%|█████▎ | 275/520 [17:39<15:07, 3.70s/it] 53%|█████▎ | 276/520 [17:43<15:01, 3.70s/it] {'loss': 1.9435, 'grad_norm': 0.0008987575097068449, 'learning_rate': 0.42756935145268626, 'epoch': 0.53} + 53%|█████▎ | 276/520 [17:43<15:01, 3.70s/it] 53%|█████▎ | 277/520 [17:47<14:57, 3.69s/it] {'loss': 2.1905, 'grad_norm': 0.0010492491699025906, 'learning_rate': 0.42476829874326366, 'epoch': 0.53} + 53%|█████▎ | 277/520 [17:47<14:57, 3.69s/it] 53%|█████▎ | 278/520 [17:50<14:53, 3.69s/it] {'loss': 1.72, 'grad_norm': 0.0007463361872595084, 'learning_rate': 0.42196822638981657, 'epoch': 0.53} + 53%|█████▎ | 278/520 [17:50<14:53, 3.69s/it] 54%|█████▎ | 279/520 [17:54<14:49, 3.69s/it] {'loss': 2.1716, 'grad_norm': 0.001062053243622948, 'learning_rate': 0.41916924318673954, 'epoch': 0.54} + 54%|█████▎ | 279/520 [17:54<14:49, 3.69s/it] 54%|█████▍ | 280/520 [17:58<14:41, 3.67s/it] {'loss': 1.792, 'grad_norm': 0.0008111151668623337, 'learning_rate': 0.4163714578861091, 'epoch': 0.54} + 54%|█████▍ | 280/520 [17:58<14:41, 3.67s/it] 54%|█████▍ | 281/520 [18:01<14:37, 3.67s/it] {'loss': 1.9859, 'grad_norm': 0.0008777468773327983, 'learning_rate': 0.4135749791934585, 'epoch': 0.54} + 54%|█████▍ | 281/520 [18:01<14:37, 3.67s/it] 54%|█████▍ | 282/520 [18:05<14:37, 3.69s/it] {'loss': 1.7351, 'grad_norm': 0.0006957191703190326, 'learning_rate': 0.41077991576355377, 'epoch': 0.54} + 54%|█████▍ | 282/520 [18:05<14:37, 3.69s/it] 54%|█████▍ | 283/520 [18:09<14:29, 3.67s/it] {'loss': 2.0215, 'grad_norm': 0.0009698893594003562, 'learning_rate': 0.4079863761961723, 'epoch': 0.54} + 54%|█████▍ | 283/520 [18:09<14:29, 3.67s/it] 55%|█████▍ | 284/520 [18:12<14:27, 3.68s/it] {'loss': 2.0928, 'grad_norm': 0.0009762036760772434, 'learning_rate': 0.40519446903188255, 'epoch': 0.55} + 55%|█████▍ | 284/520 [18:12<14:27, 3.68s/it] 55%|█████▍ | 285/520 [18:16<14:21, 3.67s/it] {'loss': 1.8274, 'grad_norm': 0.0007937083088902758, 'learning_rate': 0.40240430274782807, 'epoch': 0.55} + 55%|█████▍ | 285/520 [18:16<14:21, 3.67s/it] 55%|█████▌ | 286/520 [18:20<14:25, 3.70s/it] {'loss': 1.6755, 'grad_norm': 0.0008706313496731541, 'learning_rate': 0.3996159857535115, 'epoch': 0.55} + 55%|█████▌ | 286/520 [18:20<14:25, 3.70s/it] 55%|█████▌ | 287/520 [18:24<14:21, 3.70s/it] {'loss': 1.8776, 'grad_norm': 0.0007197182646585819, 'learning_rate': 0.39682962638658326, 'epoch': 0.55} + 55%|█████▌ | 287/520 [18:24<14:21, 3.70s/it] 55%|█████▌ | 288/520 [18:27<14:16, 3.69s/it] {'loss': 2.0613, 'grad_norm': 0.0006956329249854823, 'learning_rate': 0.3940453329086318, 'epoch': 0.55} + 55%|█████▌ | 288/520 [18:27<14:16, 3.69s/it] 56%|█████▌ | 289/520 [18:31<14:16, 3.71s/it] {'loss': 1.8304, 'grad_norm': 0.000748811168439736, 'learning_rate': 0.3912632135009769, 'epoch': 0.56} + 56%|█████▌ | 289/520 [18:31<14:16, 3.71s/it] 56%|█████▌ | 290/520 [18:35<14:11, 3.70s/it] {'loss': 1.741, 'grad_norm': 0.0006528373145936041, 'learning_rate': 0.3884833762604671, 'epoch': 0.56} + 56%|█████▌ | 290/520 [18:35<14:11, 3.70s/it] 56%|█████▌ | 291/520 [18:38<14:03, 3.68s/it] {'loss': 1.7781, 'grad_norm': 0.0008270411426527312, 'learning_rate': 0.3857059291952791, 'epoch': 0.56} + 56%|█████▌ | 291/520 [18:38<14:03, 3.68s/it] 56%|█████▌ | 292/520 [18:42<14:01, 3.69s/it] {'loss': 1.8908, 'grad_norm': 0.0007378137693355663, 'learning_rate': 0.3829309802207215, 'epoch': 0.56} + 56%|█████▌ | 292/520 [18:42<14:01, 3.69s/it] 56%|█████▋ | 293/520 [18:46<13:56, 3.69s/it] {'loss': 1.7508, 'grad_norm': 0.0007432725870911638, 'learning_rate': 0.3801586371550418, 'epoch': 0.56} + 56%|█████▋ | 293/520 [18:46<13:56, 3.69s/it] 57%|█████▋ | 294/520 [18:49<13:55, 3.70s/it] {'loss': 1.8241, 'grad_norm': 0.0009061070364023896, 'learning_rate': 0.377389007715237, 'epoch': 0.57} + 57%|█████▋ | 294/520 [18:49<13:55, 3.70s/it] 57%|█████▋ | 295/520 [18:53<13:49, 3.69s/it] {'loss': 2.1909, 'grad_norm': 0.0009104964091867501, 'learning_rate': 0.3746221995128687, 'epoch': 0.57} + 57%|█████▋ | 295/520 [18:53<13:49, 3.69s/it] 57%|█████▋ | 296/520 [18:57<13:47, 3.70s/it] {'loss': 1.7696, 'grad_norm': 0.0008393930650288908, 'learning_rate': 0.3718583200498814, 'epoch': 0.57} + 57%|█████▋ | 296/520 [18:57<13:47, 3.70s/it] 57%|█████▋ | 297/520 [19:00<13:41, 3.68s/it] {'loss': 1.9639, 'grad_norm': 0.0008809093670169546, 'learning_rate': 0.36909747671442633, 'epoch': 0.57} + 57%|█████▋ | 297/520 [19:00<13:41, 3.68s/it] 57%|█████▋ | 298/520 [19:04<13:41, 3.70s/it] {'loss': 1.9012, 'grad_norm': 0.0007223348332845121, 'learning_rate': 0.36633977677668844, 'epoch': 0.57} + 57%|█████▋ | 298/520 [19:04<13:41, 3.70s/it] 57%|█████▊ | 299/520 [19:08<13:34, 3.69s/it] {'loss': 2.207, 'grad_norm': 0.0008758227609776777, 'learning_rate': 0.36358532738471877, 'epoch': 0.57} + 57%|█████▊ | 299/520 [19:08<13:34, 3.69s/it] 58%|█████▊ | 300/520 [19:12<13:34, 3.70s/it] {'loss': 1.9491, 'grad_norm': 0.0008871046576014641, 'learning_rate': 0.36083423556027117, 'epoch': 0.58} + 58%|█████▊ | 300/520 [19:12<13:34, 3.70s/it] 58%|█████▊ | 301/520 [19:15<13:30, 3.70s/it] {'loss': 1.911, 'grad_norm': 0.0009033291643786658, 'learning_rate': 0.35808660819464394, 'epoch': 0.58} + 58%|█████▊ | 301/520 [19:15<13:30, 3.70s/it] 58%|█████▊ | 302/520 [19:19<13:26, 3.70s/it] {'loss': 2.1849, 'grad_norm': 0.0009406710193375764, 'learning_rate': 0.35534255204452697, 'epoch': 0.58} + 58%|█████▊ | 302/520 [19:19<13:26, 3.70s/it] 58%|█████▊ | 303/520 [19:23<13:23, 3.70s/it] {'loss': 1.8308, 'grad_norm': 0.000852013559925571, 'learning_rate': 0.35260217372785374, 'epoch': 0.58} + 58%|█████▊ | 303/520 [19:23<13:23, 3.70s/it] 58%|█████▊ | 304/520 [19:26<13:19, 3.70s/it] {'loss': 2.0652, 'grad_norm': 0.0010352514850357167, 'learning_rate': 0.34986557971965854, 'epoch': 0.58} + 58%|█████▊ | 304/520 [19:26<13:19, 3.70s/it] 59%|█████▊ | 305/520 [19:30<13:13, 3.69s/it] {'loss': 2.0665, 'grad_norm': 0.0008962559366293105, 'learning_rate': 0.34713287634793977, 'epoch': 0.59} + 59%|█████▊ | 305/520 [19:30<13:13, 3.69s/it] 59%|█████▉ | 306/520 [19:34<13:11, 3.70s/it] {'loss': 1.8859, 'grad_norm': 0.0008191865018692194, 'learning_rate': 0.34440416978952826, 'epoch': 0.59} + 59%|█████▉ | 306/520 [19:34<13:11, 3.70s/it] 59%|█████▉ | 307/520 [19:37<13:09, 3.71s/it] {'loss': 1.8092, 'grad_norm': 0.000690594666208152, 'learning_rate': 0.3416795660659623, 'epoch': 0.59} + 59%|█████▉ | 307/520 [19:37<13:09, 3.71s/it] 59%|█████▉ | 308/520 [19:41<13:06, 3.71s/it] {'loss': 1.9448, 'grad_norm': 0.0007154220686701097, 'learning_rate': 0.33895917103936785, 'epoch': 0.59} + 59%|█████▉ | 308/520 [19:41<13:06, 3.71s/it] 59%|█████▉ | 309/520 [19:46<13:48, 3.93s/it] {'loss': 1.8154, 'grad_norm': 0.0007207892112371943, 'learning_rate': 0.3362430904083461, 'epoch': 0.59} + 59%|█████▉ | 309/520 [19:46<13:48, 3.93s/it] 60%|█████▉ | 310/520 [19:49<13:28, 3.85s/it] {'loss': 1.762, 'grad_norm': 0.0007565310751849085, 'learning_rate': 0.3335314297038656, 'epoch': 0.6} + 60%|█████▉ | 310/520 [19:49<13:28, 3.85s/it] 60%|█████▉ | 311/520 [19:53<13:17, 3.81s/it] {'loss': 1.7875, 'grad_norm': 0.0007306826283764822, 'learning_rate': 0.33082429428516275, 'epoch': 0.6} + 60%|█████▉ | 311/520 [19:53<13:17, 3.81s/it] 60%|██████ | 312/520 [19:57<13:31, 3.90s/it] {'loss': 1.7151, 'grad_norm': 0.0007651008607322188, 'learning_rate': 0.3281217893356478, 'epoch': 0.6} + 60%|██████ | 312/520 [19:57<13:31, 3.90s/it] 60%|██████ | 313/520 [20:01<13:13, 3.83s/it] {'loss': 1.68, 'grad_norm': 0.0007268266563032408, 'learning_rate': 0.3254240198588178, 'epoch': 0.6} + 60%|██████ | 313/520 [20:01<13:13, 3.83s/it] 60%|██████ | 314/520 [20:05<13:31, 3.94s/it] {'loss': 1.7531, 'grad_norm': 0.0007429220303457545, 'learning_rate': 0.32273109067417766, 'epoch': 0.6} + 60%|██████ | 314/520 [20:05<13:31, 3.94s/it] 61%|██████ | 315/520 [20:09<13:27, 3.94s/it] {'loss': 2.2376, 'grad_norm': 0.0010346767165871056, 'learning_rate': 0.32004310641316663, 'epoch': 0.61} + 61%|██████ | 315/520 [20:09<13:27, 3.94s/it] 61%|██████ | 316/520 [20:13<13:47, 4.06s/it] {'loss': 1.7077, 'grad_norm': 0.0008294992951583208, 'learning_rate': 0.3173601715150931, 'epoch': 0.61} + 61%|██████ | 316/520 [20:13<13:47, 4.06s/it] 61%|██████ | 317/520 [20:17<13:24, 3.96s/it] {'loss': 1.6998, 'grad_norm': 0.0007102523803370341, 'learning_rate': 0.31468239022307715, 'epoch': 0.61} + 61%|██████ | 317/520 [20:17<13:24, 3.96s/it] 61%|██████ | 318/520 [20:21<13:08, 3.90s/it] {'loss': 1.9045, 'grad_norm': 0.0007950046475841137, 'learning_rate': 0.31200986657999963, 'epoch': 0.61} + 61%|██████ | 318/520 [20:21<13:08, 3.90s/it] 61%|██████▏ | 319/520 [20:25<13:07, 3.92s/it] {'loss': 1.7031, 'grad_norm': 0.0006714925923303909, 'learning_rate': 0.30934270442446005, 'epoch': 0.61} + 61%|██████▏ | 319/520 [20:25<13:07, 3.92s/it] 62%|██████▏ | 320/520 [20:29<13:00, 3.90s/it] {'loss': 1.6448, 'grad_norm': 0.0007594522915182032, 'learning_rate': 0.3066810073867421, 'epoch': 0.62} + 62%|██████▏ | 320/520 [20:29<13:00, 3.90s/it] 62%|██████▏ | 321/520 [20:33<13:02, 3.93s/it] {'loss': 1.9012, 'grad_norm': 0.0007129089289302338, 'learning_rate': 0.30402487888478685, 'epoch': 0.62} + 62%|██████▏ | 321/520 [20:33<13:02, 3.93s/it] 62%|██████▏ | 322/520 [20:37<13:01, 3.95s/it] {'loss': 2.002, 'grad_norm': 0.000898886090716794, 'learning_rate': 0.30137442212017496, 'epoch': 0.62} + 62%|██████▏ | 322/520 [20:37<13:01, 3.95s/it] 62%|██████▏ | 323/520 [20:41<13:12, 4.02s/it] {'loss': 2.1378, 'grad_norm': 0.0010174027137503656, 'learning_rate': 0.29872974007411623, 'epoch': 0.62} + 62%|██████▏ | 323/520 [20:41<13:12, 4.02s/it] 62%|██████▏ | 324/520 [20:45<12:59, 3.98s/it] {'loss': 1.8055, 'grad_norm': 0.0007294923191228898, 'learning_rate': 0.2960909355034491, 'epoch': 0.62} + 62%|██████▏ | 324/520 [20:45<12:59, 3.98s/it] 62%|██████▎ | 325/520 [20:48<12:42, 3.91s/it] {'loss': 1.8275, 'grad_norm': 0.0008634754046577108, 'learning_rate': 0.2934581109366477, 'epoch': 0.62} + 62%|██████▎ | 325/520 [20:48<12:42, 3.91s/it] 63%|██████▎ | 326/520 [20:52<12:28, 3.86s/it] {'loss': 1.7928, 'grad_norm': 0.0008064639484111314, 'learning_rate': 0.2908313686698384, 'epoch': 0.63} + 63%|██████▎ | 326/520 [20:52<12:28, 3.86s/it] 63%|██████▎ | 327/520 [20:56<12:31, 3.89s/it] {'loss': 2.223, 'grad_norm': 0.0011105035092488274, 'learning_rate': 0.2882108107628246, 'epoch': 0.63} + 63%|██████▎ | 327/520 [20:56<12:31, 3.89s/it] 63%|██████▎ | 328/520 [21:00<12:29, 3.90s/it] {'loss': 1.9172, 'grad_norm': 0.0007490213278890572, 'learning_rate': 0.2855965390351222, 'epoch': 0.63} + 63%|██████▎ | 328/520 [21:00<12:29, 3.90s/it] 63%|██████▎ | 329/520 [21:04<12:28, 3.92s/it] {'loss': 1.7035, 'grad_norm': 0.0006788074837676128, 'learning_rate': 0.28298865506200294, 'epoch': 0.63} + 63%|██████▎ | 329/520 [21:04<12:28, 3.92s/it] 63%|██████▎ | 330/520 [21:08<12:22, 3.91s/it] {'loss': 1.7904, 'grad_norm': 0.0007301158077192767, 'learning_rate': 0.28038726017054766, 'epoch': 0.63} + 63%|██████▎ | 330/520 [21:08<12:22, 3.91s/it] 64%|██████▎ | 331/520 [21:12<12:11, 3.87s/it] {'loss': 1.7746, 'grad_norm': 0.0008105080185335865, 'learning_rate': 0.27779245543570963, 'epoch': 0.64} + 64%|██████▎ | 331/520 [21:12<12:11, 3.87s/it] 64%|██████▍ | 332/520 [21:15<12:02, 3.84s/it] {'loss': 2.1687, 'grad_norm': 0.00084702870908872, 'learning_rate': 0.2752043416763874, 'epoch': 0.64} + 64%|██████▍ | 332/520 [21:15<12:02, 3.84s/it] 64%|██████▍ | 333/520 [21:19<11:50, 3.80s/it] {'loss': 1.993, 'grad_norm': 0.0009303550964880391, 'learning_rate': 0.27262301945150735, 'epoch': 0.64} + 64%|██████▍ | 333/520 [21:19<11:50, 3.80s/it] 64%|██████▍ | 334/520 [21:23<11:42, 3.77s/it] {'loss': 1.8067, 'grad_norm': 0.0007626639339659826, 'learning_rate': 0.2700485890561167, 'epoch': 0.64} + 64%|██████▍ | 334/520 [21:23<11:42, 3.77s/it] 64%|██████▍ | 335/520 [21:26<11:31, 3.74s/it] {'loss': 1.8339, 'grad_norm': 0.0007934976710895557, 'learning_rate': 0.26748115051748633, 'epoch': 0.64} + 64%|██████▍ | 335/520 [21:26<11:31, 3.74s/it] 65%|██████▍ | 336/520 [21:30<11:30, 3.75s/it] {'loss': 1.7515, 'grad_norm': 0.0007787832455402607, 'learning_rate': 0.2649208035912249, 'epoch': 0.65} + 65%|██████▍ | 336/520 [21:30<11:30, 3.75s/it] 65%|██████▍ | 337/520 [21:34<11:26, 3.75s/it] {'loss': 1.6826, 'grad_norm': 0.0008348941985473136, 'learning_rate': 0.2623676477574025, 'epoch': 0.65} + 65%|██████▍ | 337/520 [21:34<11:26, 3.75s/it] 65%|██████▌ | 338/520 [21:38<11:26, 3.77s/it] {'loss': 1.8456, 'grad_norm': 0.000753926785241794, 'learning_rate': 0.25982178221668534, 'epoch': 0.65} + 65%|██████▌ | 338/520 [21:38<11:26, 3.77s/it] 65%|██████▌ | 339/520 [21:42<11:20, 3.76s/it] {'loss': 1.7688, 'grad_norm': 0.0008883172788757546, 'learning_rate': 0.25728330588648174, 'epoch': 0.65} + 65%|██████▌ | 339/520 [21:42<11:20, 3.76s/it] 65%|██████▌ | 340/520 [21:45<11:21, 3.79s/it] {'loss': 1.7451, 'grad_norm': 0.0008263610996992473, 'learning_rate': 0.25475231739709886, 'epoch': 0.65} + 65%|██████▌ | 340/520 [21:45<11:21, 3.79s/it] 66%|██████▌ | 341/520 [21:49<11:13, 3.76s/it] {'loss': 1.8089, 'grad_norm': 0.0007924549283410557, 'learning_rate': 0.25222891508790973, 'epoch': 0.66} + 66%|██████▌ | 341/520 [21:49<11:13, 3.76s/it] 66%|██████▌ | 342/520 [21:53<11:10, 3.77s/it] {'loss': 2.2362, 'grad_norm': 0.0010397241233310566, 'learning_rate': 0.24971319700353342, 'epoch': 0.66} + 66%|██████▌ | 342/520 [21:53<11:10, 3.77s/it] 66%|██████▌ | 343/520 [21:57<11:05, 3.76s/it] {'loss': 2.12, 'grad_norm': 0.0008779337060457889, 'learning_rate': 0.24720526089002456, 'epoch': 0.66} + 66%|██████▌ | 343/520 [21:57<11:05, 3.76s/it] 66%|██████▌ | 344/520 [22:00<10:59, 3.74s/it] {'loss': 1.6727, 'grad_norm': 0.0007062179657180754, 'learning_rate': 0.24470520419107664, 'epoch': 0.66} + 66%|██████▌ | 344/520 [22:00<10:59, 3.74s/it] 66%|██████▋ | 345/520 [22:04<10:51, 3.73s/it] {'loss': 1.8626, 'grad_norm': 0.0008480858800150577, 'learning_rate': 0.24221312404423484, 'epoch': 0.66} + 66%|██████▋ | 345/520 [22:04<10:51, 3.73s/it] 67%|██████▋ | 346/520 [22:08<10:47, 3.72s/it] {'loss': 2.0719, 'grad_norm': 0.0008659981571265746, 'learning_rate': 0.2397291172771221, 'epoch': 0.67} + 67%|██████▋ | 346/520 [22:08<10:47, 3.72s/it] 67%|██████▋ | 347/520 [22:11<10:41, 3.71s/it] {'loss': 1.6799, 'grad_norm': 0.0006987145101252961, 'learning_rate': 0.2372532804036779, 'epoch': 0.67} + 67%|██████▋ | 347/520 [22:11<10:41, 3.71s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2778 > 2048). Running this sequence through the model will result in indexing errors + 67%|██████▋ | 348/520 [22:15<10:39, 3.72s/it] {'loss': 1.7725, 'grad_norm': 0.0009613402321048028, 'learning_rate': 0.23478570962040696, 'epoch': 0.67} + 67%|██████▋ | 348/520 [22:15<10:39, 3.72s/it] 67%|██████▋ | 349/520 [22:19<10:31, 3.69s/it] {'loss': 1.7907, 'grad_norm': 0.0008669704034469008, 'learning_rate': 0.23232650080264208, 'epoch': 0.67} + 67%|██████▋ | 349/520 [22:19<10:31, 3.69s/it] 67%|██████▋ | 350/520 [22:22<10:27, 3.69s/it] {'loss': 1.7919, 'grad_norm': 0.0007510551304573001, 'learning_rate': 0.22987574950082, 'epoch': 0.67} + 67%|██████▋ | 350/520 [22:22<10:27, 3.69s/it] 68%|██████▊ | 351/520 [22:26<10:26, 3.71s/it] {'loss': 1.6026, 'grad_norm': 0.0008137839210239743, 'learning_rate': 0.22743355093676668, 'epoch': 0.68} + 68%|██████▊ | 351/520 [22:26<10:26, 3.71s/it] 68%|██████▊ | 352/520 [22:30<10:21, 3.70s/it] {'loss': 1.8202, 'grad_norm': 0.0007277278451389297, 'learning_rate': 0.22500000000000012, 'epoch': 0.68} + 68%|██████▊ | 352/520 [22:30<10:21, 3.70s/it] 68%|██████▊ | 353/520 [22:34<10:20, 3.72s/it] {'loss': 2.0469, 'grad_norm': 0.0010576085646929886, 'learning_rate': 0.22257519124404132, 'epoch': 0.68} + 68%|██████▊ | 353/520 [22:34<10:20, 3.72s/it] 68%|██████▊ | 354/520 [22:37<10:16, 3.71s/it] {'loss': 2.1646, 'grad_norm': 0.0008181984492437585, 'learning_rate': 0.2201592188827416, 'epoch': 0.68} + 68%|██████▊ | 354/520 [22:37<10:16, 3.71s/it] 68%|██████▊ | 355/520 [22:41<10:11, 3.71s/it] {'loss': 1.739, 'grad_norm': 0.0008149183265052543, 'learning_rate': 0.21775217678662198, 'epoch': 0.68} + 68%|██████▊ | 355/520 [22:41<10:11, 3.71s/it] 68%|██████▊ | 356/520 [22:45<10:10, 3.72s/it] {'loss': 1.7596, 'grad_norm': 0.000821030304408377, 'learning_rate': 0.2153541584792259, 'epoch': 0.68} + 68%|██████▊ | 356/520 [22:45<10:10, 3.72s/it] 69%|██████▊ | 357/520 [22:49<10:09, 3.74s/it] {'loss': 1.7068, 'grad_norm': 0.0007014877570559729, 'learning_rate': 0.21296525713348466, 'epoch': 0.69} + 69%|██████▊ | 357/520 [22:49<10:09, 3.74s/it] 69%|██████▉ | 358/520 [22:53<10:23, 3.85s/it] {'loss': 1.6969, 'grad_norm': 0.00077172327946405, 'learning_rate': 0.2105855655680986, 'epoch': 0.69} + 69%|██████▉ | 358/520 [22:53<10:23, 3.85s/it] 69%|██████▉ | 359/520 [22:57<10:38, 3.97s/it] {'loss': 2.1119, 'grad_norm': 0.0010596375794226787, 'learning_rate': 0.2082151762439292, 'epoch': 0.69} + 69%|██████▉ | 359/520 [22:57<10:38, 3.97s/it] 69%|██████▉ | 360/520 [23:01<10:44, 4.03s/it] {'loss': 2.0905, 'grad_norm': 0.0011955559827526432, 'learning_rate': 0.2058541812604083, 'epoch': 0.69} + 69%|██████▉ | 360/520 [23:01<10:44, 4.03s/it] 69%|██████▉ | 361/520 [23:05<10:28, 3.95s/it] {'loss': 2.1088, 'grad_norm': 0.0010497728472541265, 'learning_rate': 0.20350267235195796, 'epoch': 0.69} + 69%|██████▉ | 361/520 [23:05<10:28, 3.95s/it] 70%|██████▉ | 362/520 [23:09<10:12, 3.87s/it] {'loss': 1.6708, 'grad_norm': 0.0008430497950677495, 'learning_rate': 0.20116074088442726, 'epoch': 0.7} + 70%|██████▉ | 362/520 [23:09<10:12, 3.87s/it] 70%|██████▉ | 363/520 [23:12<10:08, 3.88s/it] {'loss': 1.8581, 'grad_norm': 0.0007726483850890169, 'learning_rate': 0.19882847785154228, 'epoch': 0.7} + 70%|██████▉ | 363/520 [23:12<10:08, 3.88s/it] 70%|███████ | 364/520 [23:16<10:10, 3.91s/it] {'loss': 2.1264, 'grad_norm': 0.00101187710982609, 'learning_rate': 0.1965059738713701, 'epoch': 0.7} + 70%|███████ | 364/520 [23:16<10:10, 3.91s/it] 70%|███████ | 365/520 [23:20<09:56, 3.85s/it] {'loss': 1.8832, 'grad_norm': 0.0007896632762242964, 'learning_rate': 0.1941933191827985, 'epoch': 0.7} + 70%|███████ | 365/520 [23:20<09:56, 3.85s/it] 70%|███████ | 366/520 [23:24<09:47, 3.81s/it] {'loss': 1.8058, 'grad_norm': 0.0007354210520660263, 'learning_rate': 0.1918906036420294, 'epoch': 0.7} + 70%|███████ | 366/520 [23:24<09:47, 3.81s/it] 71%|███████ | 367/520 [23:28<09:37, 3.77s/it] {'loss': 1.8145, 'grad_norm': 0.0008424958652593204, 'learning_rate': 0.18959791671908743, 'epoch': 0.71} + 71%|███████ | 367/520 [23:28<09:37, 3.77s/it] 71%|███████ | 368/520 [23:31<09:32, 3.77s/it] {'loss': 1.6471, 'grad_norm': 0.0008400526137325087, 'learning_rate': 0.18731534749434467, 'epoch': 0.71} + 71%|███████ | 368/520 [23:31<09:32, 3.77s/it] 71%|███████ | 369/520 [23:35<09:25, 3.74s/it] {'loss': 2.0547, 'grad_norm': 0.0007950600478846705, 'learning_rate': 0.18504298465505792, 'epoch': 0.71} + 71%|███████ | 369/520 [23:35<09:25, 3.74s/it] 71%|███████ | 370/520 [23:39<09:19, 3.73s/it] {'loss': 1.7308, 'grad_norm': 0.0008832461411133784, 'learning_rate': 0.18278091649192435, 'epoch': 0.71} + 71%|███████ | 370/520 [23:39<09:19, 3.73s/it] 71%|███████▏ | 371/520 [23:42<09:15, 3.73s/it] {'loss': 1.7197, 'grad_norm': 0.000868244795509102, 'learning_rate': 0.18052923089564987, 'epoch': 0.71} + 71%|███████▏ | 371/520 [23:42<09:15, 3.73s/it] 72%|███████▏ | 372/520 [23:46<09:11, 3.73s/it] {'loss': 2.1623, 'grad_norm': 0.0010140923828558857, 'learning_rate': 0.17828801535353508, 'epoch': 0.72} + 72%|███████▏ | 372/520 [23:46<09:11, 3.73s/it] 72%|███████▏ | 373/520 [23:50<09:11, 3.75s/it] {'loss': 2.0622, 'grad_norm': 0.0011118909037103955, 'learning_rate': 0.17605735694607572, 'epoch': 0.72} + 72%|███████▏ | 373/520 [23:50<09:11, 3.75s/it] 72%|███████▏ | 374/520 [23:54<09:13, 3.79s/it] {'loss': 1.7955, 'grad_norm': 0.0008717051262730681, 'learning_rate': 0.17383734234357875, 'epoch': 0.72} + 72%|███████▏ | 374/520 [23:54<09:13, 3.79s/it] 72%|███████▏ | 375/520 [23:58<09:13, 3.82s/it] {'loss': 1.6805, 'grad_norm': 0.000967448899445843, 'learning_rate': 0.17162805780279533, 'epoch': 0.72} + 72%|███████▏ | 375/520 [23:58<09:13, 3.82s/it] 72%|███████▏ | 376/520 [24:02<09:11, 3.83s/it] {'loss': 1.8124, 'grad_norm': 0.0007777227341996313, 'learning_rate': 0.16942958916356993, 'epoch': 0.72} + 72%|███████▏ | 376/520 [24:02<09:11, 3.83s/it] 72%|███████▎ | 377/520 [24:05<09:08, 3.84s/it] {'loss': 1.7733, 'grad_norm': 0.0008592412118474397, 'learning_rate': 0.1672420218455037, 'epoch': 0.72} + 72%|███████▎ | 377/520 [24:05<09:08, 3.84s/it] 73%|███████▎ | 378/520 [24:09<09:04, 3.84s/it] {'loss': 1.8443, 'grad_norm': 0.0007282483236074305, 'learning_rate': 0.16506544084463715, 'epoch': 0.73} + 73%|███████▎ | 378/520 [24:09<09:04, 3.84s/it] 73%|███████▎ | 379/520 [24:13<09:02, 3.85s/it] {'loss': 1.8388, 'grad_norm': 0.0007488135382364372, 'learning_rate': 0.1628999307301462, 'epoch': 0.73} + 73%|███████▎ | 379/520 [24:13<09:02, 3.85s/it] 73%|███████▎ | 380/520 [24:17<08:58, 3.85s/it] {'loss': 2.1749, 'grad_norm': 0.0009755508866617741, 'learning_rate': 0.1607455756410573, 'epoch': 0.73} + 73%|███████▎ | 380/520 [24:17<08:58, 3.85s/it] 73%|███████▎ | 381/520 [24:21<08:54, 3.85s/it] {'loss': 1.8076, 'grad_norm': 0.0007152665606196855, 'learning_rate': 0.15860245928297836, 'epoch': 0.73} + 73%|███████▎ | 381/520 [24:21<08:54, 3.85s/it] 73%|███████▎ | 382/520 [24:25<08:49, 3.84s/it] {'loss': 2.075, 'grad_norm': 0.0008973633994628863, 'learning_rate': 0.15647066492484563, 'epoch': 0.73} + 73%|███████▎ | 382/520 [24:25<08:49, 3.84s/it] 74%|███████▎ | 383/520 [24:28<08:39, 3.79s/it] {'loss': 1.6241, 'grad_norm': 0.0010154837797889684, 'learning_rate': 0.15435027539568885, 'epoch': 0.74} + 74%|███████▎ | 383/520 [24:28<08:39, 3.79s/it] 74%|███████▍ | 384/520 [24:32<08:33, 3.78s/it] {'loss': 2.4286, 'grad_norm': 0.0011149136094282027, 'learning_rate': 0.1522413730814134, 'epoch': 0.74} + 74%|███████▍ | 384/520 [24:32<08:33, 3.78s/it] 74%|███████▍ | 385/520 [24:36<08:26, 3.75s/it] {'loss': 1.7846, 'grad_norm': 0.0007698714009402183, 'learning_rate': 0.15014403992159825, 'epoch': 0.74} + 74%|███████▍ | 385/520 [24:36<08:26, 3.75s/it] 74%|███████▍ | 386/520 [24:39<08:20, 3.73s/it] {'loss': 1.6647, 'grad_norm': 0.00071764132241911, 'learning_rate': 0.14805835740631354, 'epoch': 0.74} + 74%|███████▍ | 386/520 [24:39<08:20, 3.73s/it] 74%|███████▍ | 387/520 [24:43<08:17, 3.74s/it] {'loss': 2.2529, 'grad_norm': 0.0008971881575670362, 'learning_rate': 0.1459844065729529, 'epoch': 0.74} + 74%|███████▍ | 387/520 [24:43<08:17, 3.74s/it] 75%|███████▍ | 388/520 [24:47<08:11, 3.73s/it] {'loss': 1.6568, 'grad_norm': 0.0007458636596929201, 'learning_rate': 0.1439222680030862, 'epoch': 0.75} + 75%|███████▍ | 388/520 [24:47<08:11, 3.73s/it] 75%|███████▍ | 389/520 [24:51<08:06, 3.71s/it] {'loss': 1.801, 'grad_norm': 0.0008937029250544288, 'learning_rate': 0.14187202181932793, 'epoch': 0.75} + 75%|███████▍ | 389/520 [24:51<08:06, 3.71s/it] 75%|███████▌ | 390/520 [24:54<08:02, 3.71s/it] {'loss': 1.7948, 'grad_norm': 0.0007795772603163828, 'learning_rate': 0.13983374768222384, 'epoch': 0.75} + 75%|███████▌ | 390/520 [24:54<08:02, 3.71s/it] 75%|███████▌ | 391/520 [24:58<07:57, 3.70s/it] {'loss': 1.9122, 'grad_norm': 0.0007090075320060641, 'learning_rate': 0.13780752478715627, 'epoch': 0.75} + 75%|███████▌ | 391/520 [24:58<07:57, 3.70s/it] 75%|███████▌ | 392/520 [25:02<07:59, 3.74s/it] {'loss': 1.6857, 'grad_norm': 0.0007209725195726478, 'learning_rate': 0.13579343186126727, 'epoch': 0.75} + 75%|███████▌ | 392/520 [25:02<07:59, 3.74s/it] 76%|███████▌ | 393/520 [25:06<07:52, 3.72s/it] {'loss': 1.9339, 'grad_norm': 0.0008737885981894962, 'learning_rate': 0.1337915471603989, 'epoch': 0.76} + 76%|███████▌ | 393/520 [25:06<07:52, 3.72s/it] 76%|███████▌ | 394/520 [25:09<07:45, 3.69s/it] {'loss': 1.7721, 'grad_norm': 0.0008785921901444193, 'learning_rate': 0.13180194846605364, 'epoch': 0.76} + 76%|███████▌ | 394/520 [25:09<07:45, 3.69s/it] 76%|███████▌ | 395/520 [25:13<07:42, 3.70s/it] {'loss': 1.7265, 'grad_norm': 0.0007962953801884791, 'learning_rate': 0.12982471308237153, 'epoch': 0.76} + 76%|███████▌ | 395/520 [25:13<07:42, 3.70s/it] 76%|███████▌ | 396/520 [25:17<07:41, 3.72s/it] {'loss': 1.8135, 'grad_norm': 0.0010069931077465615, 'learning_rate': 0.1278599178331267, 'epoch': 0.76} + 76%|███████▌ | 396/520 [25:17<07:41, 3.72s/it] 76%|███████▋ | 397/520 [25:20<07:40, 3.75s/it] {'loss': 1.8169, 'grad_norm': 0.0008537356727055997, 'learning_rate': 0.12590763905874314, 'epoch': 0.76} + 76%|███████▋ | 397/520 [25:20<07:40, 3.75s/it] 77%|███████▋ | 398/520 [25:24<07:42, 3.79s/it] {'loss': 1.7727, 'grad_norm': 0.0007942470309875772, 'learning_rate': 0.12396795261332731, 'epoch': 0.77} + 77%|███████▋ | 398/520 [25:24<07:42, 3.79s/it] 77%|███████▋ | 399/520 [25:28<07:39, 3.80s/it] {'loss': 2.0028, 'grad_norm': 0.001195708803851033, 'learning_rate': 0.12204093386172225, 'epoch': 0.77} + 77%|███████▋ | 399/520 [25:28<07:39, 3.80s/it] 77%|███████▋ | 400/520 [25:32<07:38, 3.82s/it] {'loss': 2.0224, 'grad_norm': 0.0009257094972654131, 'learning_rate': 0.12012665767657825, 'epoch': 0.77} + 77%|███████▋ | 400/520 [25:32<07:38, 3.82s/it] 77%|███████▋ | 401/520 [25:36<07:35, 3.83s/it] {'loss': 1.568, 'grad_norm': 0.0008924773110179011, 'learning_rate': 0.11822519843544421, 'epoch': 0.77} + 77%|███████▋ | 401/520 [25:36<07:35, 3.83s/it] 77%|███████▋ | 402/520 [25:40<07:33, 3.84s/it] {'loss': 1.6591, 'grad_norm': 0.0008141839401535407, 'learning_rate': 0.11633663001787797, 'epoch': 0.77} + 77%|███████▋ | 402/520 [25:40<07:33, 3.84s/it] 78%|███████▊ | 403/520 [25:43<07:26, 3.81s/it] {'loss': 1.7555, 'grad_norm': 0.0008301932320504838, 'learning_rate': 0.11446102580257549, 'epoch': 0.78} + 78%|███████▊ | 403/520 [25:43<07:26, 3.81s/it] 78%|███████▊ | 404/520 [25:47<07:18, 3.78s/it] {'loss': 1.6655, 'grad_norm': 0.0009943937729000854, 'learning_rate': 0.11259845866451956, 'epoch': 0.78} + 78%|███████▊ | 404/520 [25:47<07:18, 3.78s/it] 78%|███████▊ | 405/520 [25:51<07:18, 3.81s/it] {'loss': 1.9795, 'grad_norm': 0.0007851936689852431, 'learning_rate': 0.11074900097214908, 'epoch': 0.78} + 78%|███████▊ | 405/520 [25:51<07:18, 3.81s/it] 78%|███████▊ | 406/520 [25:55<07:19, 3.85s/it] {'loss': 1.9583, 'grad_norm': 0.0010514865933235922, 'learning_rate': 0.1089127245845461, 'epoch': 0.78} + 78%|███████▊ | 406/520 [25:55<07:19, 3.85s/it] 78%|███████▊ | 407/520 [25:59<07:15, 3.85s/it] {'loss': 1.9158, 'grad_norm': 0.0008339665976523141, 'learning_rate': 0.10708970084864515, 'epoch': 0.78} + 78%|███████▊ | 407/520 [25:59<07:15, 3.85s/it] 78%|███████▊ | 408/520 [26:03<07:11, 3.85s/it] {'loss': 1.7577, 'grad_norm': 0.0008759330670809398, 'learning_rate': 0.10528000059645995, 'epoch': 0.78} + 78%|███████▊ | 408/520 [26:03<07:11, 3.85s/it] 79%|███████▊ | 409/520 [26:06<07:04, 3.83s/it] {'loss': 1.9398, 'grad_norm': 0.0009488517830687872, 'learning_rate': 0.10348369414233174, 'epoch': 0.79} + 79%|███████▊ | 409/520 [26:06<07:04, 3.83s/it] 79%|███████▉ | 410/520 [26:10<06:59, 3.81s/it] {'loss': 1.5646, 'grad_norm': 0.000873508632185158, 'learning_rate': 0.10170085128019768, 'epoch': 0.79} + 79%|███████▉ | 410/520 [26:10<06:59, 3.81s/it] 79%|███████▉ | 411/520 [26:14<06:54, 3.80s/it] {'loss': 1.8639, 'grad_norm': 0.0008358280753972331, 'learning_rate': 0.09993154128087836, 'epoch': 0.79} + 79%|███████▉ | 411/520 [26:14<06:54, 3.80s/it] 79%|███████▉ | 412/520 [26:18<06:50, 3.80s/it] {'loss': 1.7705, 'grad_norm': 0.0007695916578093165, 'learning_rate': 0.0981758328893866, 'epoch': 0.79} + 79%|███████▉ | 412/520 [26:18<06:50, 3.80s/it] 79%|███████▉ | 413/520 [26:22<06:45, 3.79s/it] {'loss': 2.1555, 'grad_norm': 0.0008505594887181589, 'learning_rate': 0.09643379432225693, 'epoch': 0.79} + 79%|███████▉ | 413/520 [26:22<06:45, 3.79s/it] 80%|███████▉ | 414/520 [26:25<06:39, 3.77s/it] {'loss': 1.795, 'grad_norm': 0.0013067037788996665, 'learning_rate': 0.09470549326489411, 'epoch': 0.8} + 80%|███████▉ | 414/520 [26:25<06:39, 3.77s/it] 80%|███████▉ | 415/520 [26:29<06:33, 3.75s/it] {'loss': 1.6809, 'grad_norm': 0.0006773600846829742, 'learning_rate': 0.09299099686894423, 'epoch': 0.8} + 80%|███████▉ | 415/520 [26:29<06:33, 3.75s/it] 80%|████████ | 416/520 [26:33<06:28, 3.74s/it] {'loss': 1.6307, 'grad_norm': 0.0009162797582556436, 'learning_rate': 0.09129037174968503, 'epoch': 0.8} + 80%|████████ | 416/520 [26:33<06:28, 3.74s/it] 80%|████████ | 417/520 [26:36<06:25, 3.74s/it] {'loss': 1.8194, 'grad_norm': 0.0007427837480041831, 'learning_rate': 0.08960368398343747, 'epoch': 0.8} + 80%|████████ | 417/520 [26:36<06:25, 3.74s/it] 80%|████████ | 418/520 [26:40<06:22, 3.75s/it] {'loss': 1.7929, 'grad_norm': 0.0006869084067531414, 'learning_rate': 0.08793099910499924, 'epoch': 0.8} + 80%|████████ | 418/520 [26:40<06:22, 3.75s/it] 81%|████████ | 419/520 [26:44<06:18, 3.75s/it] {'loss': 1.7989, 'grad_norm': 0.0008500057326498656, 'learning_rate': 0.08627238210509765, 'epoch': 0.81} + 81%|████████ | 419/520 [26:44<06:18, 3.75s/it] 81%|████████ | 420/520 [26:48<06:13, 3.74s/it] {'loss': 1.6681, 'grad_norm': 0.00082096372115528, 'learning_rate': 0.08462789742786457, 'epoch': 0.81} + 81%|████████ | 420/520 [26:48<06:13, 3.74s/it] 81%|████████ | 421/520 [26:51<06:10, 3.74s/it] {'loss': 1.5693, 'grad_norm': 0.0010297796368286957, 'learning_rate': 0.08299760896833293, 'epoch': 0.81} + 81%|████████ | 421/520 [26:51<06:10, 3.74s/it] 81%|████████ | 422/520 [26:55<06:05, 3.73s/it] {'loss': 1.7287, 'grad_norm': 0.0008185686636918476, 'learning_rate': 0.08138158006995365, 'epoch': 0.81} + 81%|████████ | 422/520 [26:55<06:05, 3.73s/it] 81%|████████▏ | 423/520 [26:59<06:01, 3.73s/it] {'loss': 1.7519, 'grad_norm': 0.0009212816912886345, 'learning_rate': 0.07977987352213499, 'epoch': 0.81} + 81%|████████▏ | 423/520 [26:59<06:01, 3.73s/it] 82%|████████▏ | 424/520 [27:03<05:58, 3.73s/it] {'loss': 2.1404, 'grad_norm': 0.001095811652656639, 'learning_rate': 0.0781925515578024, 'epoch': 0.82} + 82%|████████▏ | 424/520 [27:03<05:58, 3.73s/it] 82%|████████▏ | 425/520 [27:06<05:57, 3.76s/it] {'loss': 1.6729, 'grad_norm': 0.0007605442062788635, 'learning_rate': 0.07661967585098063, 'epoch': 0.82} + 82%|████████▏ | 425/520 [27:06<05:57, 3.76s/it] 82%|████████▏ | 426/520 [27:10<05:52, 3.75s/it] {'loss': 1.8787, 'grad_norm': 0.0010992543549534707, 'learning_rate': 0.07506130751439803, 'epoch': 0.82} + 82%|████████▏ | 426/520 [27:10<05:52, 3.75s/it] 82%|████████▏ | 427/520 [27:14<05:49, 3.76s/it] {'loss': 1.6042, 'grad_norm': 0.0009076484992818567, 'learning_rate': 0.07351750709711112, 'epoch': 0.82} + 82%|████████▏ | 427/520 [27:14<05:49, 3.76s/it] 82%|████████▏ | 428/520 [27:18<05:44, 3.75s/it] {'loss': 1.5563, 'grad_norm': 0.0010872720530359856, 'learning_rate': 0.07198833458215287, 'epoch': 0.82} + 82%|████████▏ | 428/520 [27:18<05:44, 3.75s/it] 82%|████████▎ | 429/520 [27:21<05:41, 3.76s/it] {'loss': 1.7477, 'grad_norm': 0.0007687363151981653, 'learning_rate': 0.07047384938420152, 'epoch': 0.82} + 82%|████████▎ | 429/520 [27:21<05:41, 3.76s/it]Token indices sequence length is longer than the specified maximum sequence length for this model (2076 > 2048). Running this sequence through the model will result in indexing errors + 83%|████████▎ | 430/520 [27:25<05:42, 3.80s/it] {'loss': 1.6873, 'grad_norm': 0.000787521926559066, 'learning_rate': 0.06897411034727215, 'epoch': 0.83} + 83%|████████▎ | 430/520 [27:25<05:42, 3.80s/it] 83%|████████▎ | 431/520 [27:29<05:41, 3.83s/it] {'loss': 2.0519, 'grad_norm': 0.00116607655930434, 'learning_rate': 0.06748917574243089, 'epoch': 0.83} + 83%|████████▎ | 431/520 [27:29<05:41, 3.83s/it] 83%|████████▎ | 432/520 [27:33<05:37, 3.84s/it] {'loss': 1.6073, 'grad_norm': 0.0007899496017809561, 'learning_rate': 0.06601910326552998, 'epoch': 0.83} + 83%|████████▎ | 432/520 [27:33<05:37, 3.84s/it] 83%|████████▎ | 433/520 [27:37<05:31, 3.81s/it] {'loss': 1.788, 'grad_norm': 0.0008015374520681987, 'learning_rate': 0.0645639500349669, 'epoch': 0.83} + 83%|████████▎ | 433/520 [27:37<05:31, 3.81s/it] 83%|████████▎ | 434/520 [27:41<05:26, 3.79s/it] {'loss': 1.5127, 'grad_norm': 0.0010845748766984094, 'learning_rate': 0.06312377258946437, 'epoch': 0.83} + 83%|████████▎ | 434/520 [27:41<05:26, 3.79s/it] 84%|████████▎ | 435/520 [27:44<05:20, 3.77s/it] {'loss': 1.8929, 'grad_norm': 0.0009505742124979793, 'learning_rate': 0.06169862688587413, 'epoch': 0.84} + 84%|████████▎ | 435/520 [27:44<05:20, 3.77s/it] 84%|████████▍ | 436/520 [27:48<05:15, 3.76s/it] {'loss': 1.6108, 'grad_norm': 0.0010082572684909222, 'learning_rate': 0.06028856829700258, 'epoch': 0.84} + 84%|████████▍ | 436/520 [27:48<05:15, 3.76s/it] 84%|████████▍ | 437/520 [27:52<05:12, 3.76s/it] {'loss': 1.8931, 'grad_norm': 0.0007779055360649574, 'learning_rate': 0.05889365160945912, 'epoch': 0.84} + 84%|████████▍ | 437/520 [27:52<05:12, 3.76s/it] 84%|████████▍ | 438/520 [27:56<05:10, 3.78s/it] {'loss': 1.5921, 'grad_norm': 0.0007603469747836081, 'learning_rate': 0.0575139310215276, 'epoch': 0.84} + 84%|████████▍ | 438/520 [27:56<05:10, 3.78s/it] 84%|████████▍ | 439/520 [27:59<05:04, 3.77s/it] {'loss': 1.9463, 'grad_norm': 0.000746102438580814, 'learning_rate': 0.05614946014106084, 'epoch': 0.84} + 84%|████████▍ | 439/520 [27:59<05:04, 3.77s/it] 85%|████████▍ | 440/520 [28:03<05:01, 3.77s/it] {'loss': 1.7235, 'grad_norm': 0.0008578207865772747, 'learning_rate': 0.05480029198339711, 'epoch': 0.85} + 85%|████████▍ | 440/520 [28:03<05:01, 3.77s/it] 85%|████████▍ | 441/520 [28:07<05:00, 3.80s/it] {'loss': 2.0063, 'grad_norm': 0.0009244400310054208, 'learning_rate': 0.05346647896930092, 'epoch': 0.85} + 85%|████████▍ | 441/520 [28:07<05:00, 3.80s/it] 85%|████████▌ | 442/520 [28:11<04:53, 3.77s/it] {'loss': 1.7689, 'grad_norm': 0.0008850809635303914, 'learning_rate': 0.052148072922925656, 'epoch': 0.85} + 85%|████████▌ | 442/520 [28:11<04:53, 3.77s/it] 85%|████████▌ | 443/520 [28:14<04:49, 3.76s/it] {'loss': 1.7691, 'grad_norm': 0.0007415055909569241, 'learning_rate': 0.05084512506980023, 'epoch': 0.85} + 85%|████████▌ | 443/520 [28:14<04:49, 3.76s/it] 85%|████████▌ | 444/520 [28:18<04:44, 3.74s/it] {'loss': 1.7108, 'grad_norm': 0.000743465546713971, 'learning_rate': 0.049557686034839156, 'epoch': 0.85} + 85%|████████▌ | 444/520 [28:18<04:44, 3.74s/it] 86%|████████▌ | 445/520 [28:22<04:40, 3.74s/it] {'loss': 1.6334, 'grad_norm': 0.0007913327591470625, 'learning_rate': 0.04828580584037491, 'epoch': 0.86} + 86%|████████▌ | 445/520 [28:22<04:40, 3.74s/it] 86%|████████▌ | 446/520 [28:26<04:36, 3.74s/it] {'loss': 2.0728, 'grad_norm': 0.0008335067941861961, 'learning_rate': 0.047029533904214584, 'epoch': 0.86} + 86%|████████▌ | 446/520 [28:26<04:36, 3.74s/it] 86%|████████▌ | 447/520 [28:29<04:34, 3.76s/it] {'loss': 1.7574, 'grad_norm': 0.0008010510002907568, 'learning_rate': 0.045788919037720185, 'epoch': 0.86} + 86%|████████▌ | 447/520 [28:29<04:34, 3.76s/it] 86%|████████▌ | 448/520 [28:33<04:29, 3.74s/it] {'loss': 1.7082, 'grad_norm': 0.0007866134863428526, 'learning_rate': 0.04456400944391144, 'epoch': 0.86} + 86%|████████▌ | 448/520 [28:33<04:29, 3.74s/it] 86%|████████▋ | 449/520 [28:37<04:25, 3.74s/it] {'loss': 2.06, 'grad_norm': 0.0008834588341808236, 'learning_rate': 0.043354852715593584, 'epoch': 0.86} + 86%|████████▋ | 449/520 [28:37<04:25, 3.74s/it] 87%|████████▋ | 450/520 [28:41<04:20, 3.72s/it] {'loss': 1.839, 'grad_norm': 0.0009612003810441723, 'learning_rate': 0.04216149583350753, 'epoch': 0.87} + 87%|████████▋ | 450/520 [28:41<04:20, 3.72s/it] 87%|████████▋ | 451/520 [28:44<04:17, 3.74s/it] {'loss': 1.8011, 'grad_norm': 0.0009570768171097865, 'learning_rate': 0.04098398516450508, 'epoch': 0.87} + 87%|████████▋ | 451/520 [28:44<04:17, 3.74s/it] 87%|████████▋ | 452/520 [28:48<04:13, 3.72s/it] {'loss': 2.1209, 'grad_norm': 0.0008093328714672427, 'learning_rate': 0.03982236645974709, 'epoch': 0.87} + 87%|████████▋ | 452/520 [28:48<04:13, 3.72s/it] 87%|████████▋ | 453/520 [28:52<04:10, 3.74s/it] {'loss': 2.0603, 'grad_norm': 0.0008919925116356625, 'learning_rate': 0.03867668485292565, 'epoch': 0.87} + 87%|████████▋ | 453/520 [28:52<04:10, 3.74s/it] 87%|████████▋ | 454/520 [28:56<04:06, 3.73s/it] {'loss': 1.6555, 'grad_norm': 0.000892426429782506, 'learning_rate': 0.03754698485851071, 'epoch': 0.87} + 87%|████████▋ | 454/520 [28:56<04:06, 3.73s/it] 88%|████████▊ | 455/520 [28:59<04:02, 3.73s/it] {'loss': 1.8235, 'grad_norm': 0.0009578076995380698, 'learning_rate': 0.036433310370020705, 'epoch': 0.88} + 88%|████████▊ | 455/520 [28:59<04:02, 3.73s/it] 88%|████████▊ | 456/520 [29:03<03:59, 3.74s/it] {'loss': 1.7284, 'grad_norm': 0.0009003961685876322, 'learning_rate': 0.03533570465831652, 'epoch': 0.88} + 88%|████████▊ | 456/520 [29:03<03:59, 3.74s/it] 88%|████████▊ | 457/520 [29:07<03:55, 3.74s/it] {'loss': 2.2629, 'grad_norm': 0.0009659123814345111, 'learning_rate': 0.03425421036992097, 'epoch': 0.88} + 88%|████████▊ | 457/520 [29:07<03:55, 3.74s/it] 88%|████████▊ | 458/520 [29:11<03:51, 3.74s/it] {'loss': 1.9606, 'grad_norm': 0.0009641108567758357, 'learning_rate': 0.03318886952536111, 'epoch': 0.88} + 88%|████████▊ | 458/520 [29:11<03:51, 3.74s/it] 88%|████████▊ | 459/520 [29:14<03:47, 3.74s/it] {'loss': 1.7701, 'grad_norm': 0.0008511392179726062, 'learning_rate': 0.032139723517535905, 'epoch': 0.88} + 88%|████████▊ | 459/520 [29:14<03:47, 3.74s/it] 88%|████████▊ | 460/520 [29:18<03:45, 3.75s/it] {'loss': 1.6182, 'grad_norm': 0.0008074588428909012, 'learning_rate': 0.03110681311010814, 'epoch': 0.88} + 88%|████████▊ | 460/520 [29:18<03:45, 3.75s/it] 89%|████████▊ | 461/520 [29:22<03:40, 3.74s/it] {'loss': 2.3022, 'grad_norm': 0.0009208375415471107, 'learning_rate': 0.030090178435920073, 'epoch': 0.89} + 89%|████████▊ | 461/520 [29:22<03:40, 3.74s/it] 89%|████████▉ | 462/520 [29:26<03:38, 3.76s/it] {'loss': 2.167, 'grad_norm': 0.0008732036499360188, 'learning_rate': 0.029089858995434703, 'epoch': 0.89} + 89%|████████▉ | 462/520 [29:26<03:38, 3.76s/it] 89%|████████▉ | 463/520 [29:29<03:36, 3.80s/it] {'loss': 1.6477, 'grad_norm': 0.0008558239552125389, 'learning_rate': 0.02810589365520041, 'epoch': 0.89} + 89%|████████▉ | 463/520 [29:29<03:36, 3.80s/it] 89%|████████▉ | 464/520 [29:33<03:32, 3.80s/it] {'loss': 1.8391, 'grad_norm': 0.0008550850052451567, 'learning_rate': 0.02713832064634126, 'epoch': 0.89} + 89%|████████▉ | 464/520 [29:33<03:32, 3.80s/it] 89%|████████▉ | 465/520 [29:37<03:27, 3.77s/it] {'loss': 1.9845, 'grad_norm': 0.0008326417754636943, 'learning_rate': 0.02618717756307144, 'epoch': 0.89} + 89%|████████▉ | 465/520 [29:37<03:27, 3.77s/it] 90%|████████▉ | 466/520 [29:41<03:22, 3.74s/it] {'loss': 1.7785, 'grad_norm': 0.0007290906042556751, 'learning_rate': 0.02525250136123459, 'epoch': 0.9} + 90%|████████▉ | 466/520 [29:41<03:22, 3.74s/it] 90%|████████▉ | 467/520 [29:44<03:19, 3.77s/it] {'loss': 1.9732, 'grad_norm': 0.000833389825885184, 'learning_rate': 0.02433432835686779, 'epoch': 0.9} + 90%|████████▉ | 467/520 [29:44<03:19, 3.77s/it] 90%|█████████ | 468/520 [29:48<03:14, 3.75s/it] {'loss': 1.8149, 'grad_norm': 0.0009581841070023891, 'learning_rate': 0.023432694224790735, 'epoch': 0.9} + 90%|█████████ | 468/520 [29:48<03:14, 3.75s/it] 90%|█████████ | 469/520 [29:52<03:12, 3.77s/it] {'loss': 1.8479, 'grad_norm': 0.0007947981088595458, 'learning_rate': 0.0225476339972193, 'epoch': 0.9} + 90%|█████████ | 469/520 [29:52<03:12, 3.77s/it] 90%|█████████ | 470/520 [29:56<03:08, 3.77s/it] {'loss': 1.6739, 'grad_norm': 0.0008236588395005643, 'learning_rate': 0.02167918206240494, 'epoch': 0.9} + 90%|█████████ | 470/520 [29:56<03:08, 3.77s/it] 91%|█████████ | 471/520 [29:59<03:04, 3.76s/it] {'loss': 1.7656, 'grad_norm': 0.000842267771013589, 'learning_rate': 0.02082737216329793, 'epoch': 0.91} + 91%|█████████ | 471/520 [30:00<03:04, 3.76s/it] 91%|█████████ | 472/520 [30:03<03:01, 3.79s/it] {'loss': 1.6558, 'grad_norm': 0.0009490164813229398, 'learning_rate': 0.019992237396236647, 'epoch': 0.91} + 91%|█████████ | 472/520 [30:03<03:01, 3.79s/it] 91%|█████████ | 473/520 [30:07<02:57, 3.78s/it] {'loss': 1.7265, 'grad_norm': 0.0008937927730622853, 'learning_rate': 0.019173810209661867, 'epoch': 0.91} + 91%|█████████ | 473/520 [30:07<02:57, 3.78s/it] 91%|█████████ | 474/520 [30:11<02:53, 3.77s/it] {'loss': 2.0206, 'grad_norm': 0.0009233161639057053, 'learning_rate': 0.018372122402855507, 'epoch': 0.91} + 91%|█████████ | 474/520 [30:11<02:53, 3.77s/it] 91%|█████████▏| 475/520 [30:15<02:50, 3.78s/it] {'loss': 1.8923, 'grad_norm': 0.0009168327195557769, 'learning_rate': 0.01758720512470523, 'epoch': 0.91} + 91%|█████████▏| 475/520 [30:15<02:50, 3.78s/it] 92%|█████████▏| 476/520 [30:18<02:45, 3.76s/it] {'loss': 1.701, 'grad_norm': 0.0008524247764342699, 'learning_rate': 0.016819088872494586, 'epoch': 0.92} + 92%|█████████▏| 476/520 [30:18<02:45, 3.76s/it] 92%|█████████▏| 477/520 [30:22<02:41, 3.75s/it] {'loss': 1.7043, 'grad_norm': 0.0009682406974496367, 'learning_rate': 0.016067803490717552, 'epoch': 0.92} + 92%|█████████▏| 477/520 [30:22<02:41, 3.75s/it] 92%|█████████▏| 478/520 [30:26<02:37, 3.74s/it] {'loss': 1.6502, 'grad_norm': 0.0007866497379091586, 'learning_rate': 0.01533337816991931, 'epoch': 0.92} + 92%|█████████▏| 478/520 [30:26<02:37, 3.74s/it] 92%|█████████▏| 479/520 [30:29<02:32, 3.72s/it] {'loss': 2.0639, 'grad_norm': 0.0009307299240267963, 'learning_rate': 0.01461584144556175, 'epoch': 0.92} + 92%|█████████▏| 479/520 [30:30<02:32, 3.72s/it] 92%|█████████▏| 480/520 [30:33<02:29, 3.73s/it] {'loss': 2.0118, 'grad_norm': 0.0008408766382707956, 'learning_rate': 0.013915221196914967, 'epoch': 0.92} + 92%|█████████▏| 480/520 [30:33<02:29, 3.73s/it] 92%|█████████▎| 481/520 [30:37<02:25, 3.74s/it] {'loss': 2.0516, 'grad_norm': 0.0008388243305695658, 'learning_rate': 0.013231544645974069, 'epoch': 0.93} + 92%|█████████▎| 481/520 [30:37<02:25, 3.74s/it] 93%|█████████▎| 482/520 [30:41<02:21, 3.73s/it] {'loss': 2.0949, 'grad_norm': 0.0009122323862556244, 'learning_rate': 0.012564838356401476, 'epoch': 0.93} + 93%|█████████▎| 482/520 [30:41<02:21, 3.73s/it] 93%|█████████▎| 483/520 [30:44<02:18, 3.74s/it] {'loss': 1.8026, 'grad_norm': 0.0008151813767680393, 'learning_rate': 0.011915128232494493, 'epoch': 0.93} + 93%|█████████▎| 483/520 [30:44<02:18, 3.74s/it] 93%|█████████▎| 484/520 [30:48<02:14, 3.74s/it] {'loss': 1.7535, 'grad_norm': 0.0008448491560845379, 'learning_rate': 0.011282439518179371, 'epoch': 0.93} + 93%|█████████▎| 484/520 [30:48<02:14, 3.74s/it] 93%|█████████▎| 485/520 [30:52<02:11, 3.75s/it] {'loss': 1.6881, 'grad_norm': 0.000796196292718411, 'learning_rate': 0.010666796796029988, 'epoch': 0.93} + 93%|█████████▎| 485/520 [30:52<02:11, 3.75s/it] 93%|█████████▎| 486/520 [30:56<02:06, 3.73s/it] {'loss': 1.8274, 'grad_norm': 0.0007263860836042314, 'learning_rate': 0.010068223986312958, 'epoch': 0.93} + 93%|█████████▎| 486/520 [30:56<02:06, 3.73s/it] 94%|█████████▎| 487/520 [30:59<02:03, 3.74s/it] {'loss': 1.6435, 'grad_norm': 0.0007473459517210812, 'learning_rate': 0.009486744346058234, 'epoch': 0.94} + 94%|█████████▎| 487/520 [30:59<02:03, 3.74s/it] 94%|█████████▍| 488/520 [31:03<02:00, 3.77s/it] {'loss': 1.6217, 'grad_norm': 0.0008952074886668031, 'learning_rate': 0.008922380468155277, 'epoch': 0.94} + 94%|█████████▍| 488/520 [31:03<02:00, 3.77s/it] 94%|█████████▍| 489/520 [31:07<01:58, 3.81s/it] {'loss': 1.9807, 'grad_norm': 0.0008206995801214657, 'learning_rate': 0.008375154280475555, 'epoch': 0.94} + 94%|█████████▍| 489/520 [31:07<01:58, 3.81s/it] 94%|█████████▍| 490/520 [31:11<01:54, 3.83s/it] {'loss': 1.7561, 'grad_norm': 0.0008528028759844991, 'learning_rate': 0.007845087045020278, 'epoch': 0.94} + 94%|█████████▍| 490/520 [31:11<01:54, 3.83s/it] 94%|█████████▍| 491/520 [31:15<01:50, 3.81s/it] {'loss': 1.6733, 'grad_norm': 0.0008267488466292067, 'learning_rate': 0.007332199357094405, 'epoch': 0.94} + 94%|█████████▍| 491/520 [31:15<01:50, 3.81s/it] 95%|█████████▍| 492/520 [31:18<01:45, 3.77s/it] {'loss': 1.8738, 'grad_norm': 0.0009230978149457824, 'learning_rate': 0.006836511144506391, 'epoch': 0.95} + 95%|█████████▍| 492/520 [31:19<01:45, 3.77s/it] 95%|█████████▍| 493/520 [31:22<01:42, 3.79s/it] {'loss': 2.1548, 'grad_norm': 0.001046558530639326, 'learning_rate': 0.006358041666793851, 'epoch': 0.95} + 95%|█████████▍| 493/520 [31:22<01:42, 3.79s/it] 95%|█████████▌| 494/520 [31:26<01:39, 3.83s/it] {'loss': 1.7534, 'grad_norm': 0.0007236758506046233, 'learning_rate': 0.005896809514475509, 'epoch': 0.95} + 95%|█████████▌| 494/520 [31:26<01:39, 3.83s/it] 95%|█████████▌| 495/520 [31:30<01:36, 3.85s/it] {'loss': 1.7078, 'grad_norm': 0.0007751175706428776, 'learning_rate': 0.0054528326083283785, 'epoch': 0.95} + 95%|█████████▌| 495/520 [31:30<01:36, 3.85s/it] 95%|█████████▌| 496/520 [31:34<01:33, 3.88s/it] {'loss': 1.6412, 'grad_norm': 0.0009154088871730396, 'learning_rate': 0.005026128198692165, 'epoch': 0.95} + 95%|█████████▌| 496/520 [31:34<01:33, 3.88s/it] 96%|█████████▌| 497/520 [31:38<01:29, 3.88s/it] {'loss': 1.9541, 'grad_norm': 0.0009179609185565586, 'learning_rate': 0.004616712864798306, 'epoch': 0.96} + 96%|█████████▌| 497/520 [31:38<01:29, 3.88s/it] 96%|█████████▌| 498/520 [31:42<01:25, 3.88s/it] {'loss': 1.6938, 'grad_norm': 0.0007696618086793558, 'learning_rate': 0.0042246025141262356, 'epoch': 0.96} + 96%|█████████▌| 498/520 [31:42<01:25, 3.88s/it] 96%|█████████▌| 499/520 [31:46<01:21, 3.89s/it] {'loss': 2.1207, 'grad_norm': 0.00081418037698645, 'learning_rate': 0.003849812381785328, 'epoch': 0.96} + 96%|█████████▌| 499/520 [31:46<01:21, 3.89s/it] 96%|█████████▌| 500/520 [31:50<01:17, 3.89s/it] {'loss': 1.8893, 'grad_norm': 0.0009285240751530511, 'learning_rate': 0.0034923570299225716, 'epoch': 0.96} + 96%|█████████▌| 500/520 [31:50<01:17, 3.89s/it] 96%|█████████▋| 501/520 [31:54<01:13, 3.89s/it] {'loss': 2.0464, 'grad_norm': 0.0009680103134524089, 'learning_rate': 0.0031522503471571707, 'epoch': 0.96} + 96%|█████████▋| 501/520 [31:54<01:13, 3.89s/it] 97%|█████████▋| 502/520 [31:57<01:09, 3.85s/it] {'loss': 1.7773, 'grad_norm': 0.0008111754362988019, 'learning_rate': 0.0028295055480408284, 'epoch': 0.97} + 97%|█████████▋| 502/520 [31:57<01:09, 3.85s/it] 97%|█████████▋| 503/520 [32:01<01:04, 3.82s/it] {'loss': 2.0179, 'grad_norm': 0.0008621852628351626, 'learning_rate': 0.0025241351725441064, 'epoch': 0.97} + 97%|█████████▋| 503/520 [32:01<01:04, 3.82s/it] 97%|█████████▋| 504/520 [32:05<01:00, 3.79s/it] {'loss': 1.8184, 'grad_norm': 0.0010459827056927106, 'learning_rate': 0.002236151085569366, 'epoch': 0.97} + 97%|█████████▋| 504/520 [32:05<01:00, 3.79s/it] 97%|█████████▋| 505/520 [32:09<00:56, 3.77s/it] {'loss': 1.8144, 'grad_norm': 0.0008417950833307422, 'learning_rate': 0.001965564476489784, 'epoch': 0.97} + 97%|█████████▋| 505/520 [32:09<00:56, 3.77s/it] 97%|█████████▋| 506/520 [32:12<00:52, 3.76s/it] {'loss': 1.6864, 'grad_norm': 0.0008783873264102175, 'learning_rate': 0.0017123858587145047, 'epoch': 0.97} + 97%|█████████▋| 506/520 [32:12<00:52, 3.76s/it] 98%|█████████▊| 507/520 [32:16<00:49, 3.77s/it] {'loss': 2.1809, 'grad_norm': 0.0008807742541716485, 'learning_rate': 0.001476625069280213, 'epoch': 0.97} + 98%|█████████▊| 507/520 [32:16<00:49, 3.77s/it] 98%|█████████▊| 508/520 [32:20<00:44, 3.74s/it] {'loss': 1.833, 'grad_norm': 0.0008114153893531413, 'learning_rate': 0.0012582912684689419, 'epoch': 0.98} + 98%|█████████▊| 508/520 [32:20<00:44, 3.74s/it] 98%|█████████▊| 509/520 [32:23<00:41, 3.73s/it] {'loss': 1.7797, 'grad_norm': 0.0007746832155004857, 'learning_rate': 0.0010573929394520065, 'epoch': 0.98} + 98%|█████████▊| 509/520 [32:23<00:41, 3.73s/it] 98%|█████████▊| 510/520 [32:27<00:37, 3.71s/it] {'loss': 1.7285, 'grad_norm': 0.0007955100034416965, 'learning_rate': 0.0008739378879606685, 'epoch': 0.98} + 98%|█████████▊| 510/520 [32:27<00:37, 3.71s/it] 98%|█████████▊| 511/520 [32:31<00:33, 3.70s/it] {'loss': 1.6903, 'grad_norm': 0.0008367217810360485, 'learning_rate': 0.000707933241982528, 'epoch': 0.98} + 98%|█████████▊| 511/520 [32:31<00:33, 3.70s/it] 98%|█████████▊| 512/520 [32:35<00:29, 3.72s/it] {'loss': 1.5873, 'grad_norm': 0.0009041398367404913, 'learning_rate': 0.000559385451484945, 'epoch': 0.98} + 98%|█████████▊| 512/520 [32:35<00:29, 3.72s/it] 99%|█████████▊| 513/520 [32:38<00:26, 3.73s/it] {'loss': 1.8562, 'grad_norm': 0.0009524990052913782, 'learning_rate': 0.0004283002881639908, 'epoch': 0.99} + 99%|█████████▊| 513/520 [32:38<00:26, 3.73s/it] 99%|█████████▉| 514/520 [32:42<00:22, 3.73s/it] {'loss': 1.7681, 'grad_norm': 0.0007587254261730617, 'learning_rate': 0.0003146828452206263, 'epoch': 0.99} + 99%|█████████▉| 514/520 [32:42<00:22, 3.73s/it] 99%|█████████▉| 515/520 [32:46<00:18, 3.72s/it] {'loss': 1.8891, 'grad_norm': 0.0008977281727636225, 'learning_rate': 0.00021853753716256086, 'epoch': 0.99} + 99%|█████████▉| 515/520 [32:46<00:18, 3.72s/it] 99%|█████████▉| 516/520 [32:49<00:14, 3.73s/it] {'loss': 1.7155, 'grad_norm': 0.0008047569217878386, 'learning_rate': 0.00013986809963268955, 'epoch': 0.99} + 99%|█████████▉| 516/520 [32:49<00:14, 3.73s/it] 99%|█████████▉| 517/520 [32:53<00:11, 3.71s/it] {'loss': 2.0904, 'grad_norm': 0.0009823104635236898, 'learning_rate': 7.867758926410895e-05, 'epoch': 0.99} + 99%|█████████▉| 517/520 [32:53<00:11, 3.71s/it] 100%|█████████▉| 518/520 [32:57<00:07, 3.70s/it] {'loss': 1.7683, 'grad_norm': 0.0008509259344367014, 'learning_rate': 3.496838356131238e-05, 'epoch': 1.0} + 100%|█████████▉| 518/520 [32:57<00:07, 3.70s/it] 100%|█████████▉| 519/520 [33:00<00:03, 3.69s/it] {'loss': 2.0357, 'grad_norm': 0.0008846080489275831, 'learning_rate': 8.742180807813637e-06, 'epoch': 1.0} + 100%|█████████▉| 519/520 [33:00<00:03, 3.69s/it] 100%|██████████| 520/520 [33:05<00:00, 3.92s/it] {'loss': 2.209, 'grad_norm': 0.0009798587760858634, 'learning_rate': 0.0, 'epoch': 1.0} + 100%|██████████| 520/520 [33:05<00:00, 3.92s/it] {'train_runtime': 1985.46, 'train_samples_per_second': 33.508, 'train_steps_per_second': 0.262, 'train_loss': 2.337684589395156, 'epoch': 1.0} + 100%|██████████| 520/520 [33:05<00:00, 3.92s/it] 100%|██████████| 520/520 [33:05<00:00, 3.82s/it] +[2025-10-09 10:56:54,488] [INFO] [launch.py:348:main] Process 1158268 exits successfully. +[2025-10-09 10:56:54,488] [INFO] [launch.py:348:main] Process 1158267 exits successfully. +[2025-10-09 10:56:54,489] [INFO] [launch.py:348:main] Process 1158266 exits successfully. +[2025-10-09 10:56:54,489] [INFO] [launch.py:348:main] Process 1158265 exits successfully. +[2025-10-09 10:56:55,490] [INFO] [launch.py:348:main] Process 1158264 exits successfully. +[2025-10-09 10:56:55,491] [INFO] [launch.py:348:main] Process 1158269 exits successfully. +[2025-10-09 10:56:55,491] [INFO] [launch.py:348:main] Process 1158263 exits successfully. +[2025-10-09 10:56:58,495] [INFO] [launch.py:348:main] Process 1158262 exits successfully. +==== EXPERIMENT COMPLETED: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation ==== +Log File: qwen2.5-0_5b_base_masktune_42_llm-connector_text-3.0_0.5_9e-1_connector-3.0_0.5_9e-1_ablation_20251009_102218.log +Timestamp: 2025-10-09 10:57:01 +=====================================